repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
samuel1208/scikit-learn | sklearn/covariance/tests/test_covariance.py | 142 | 11068 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
# Tests Covariance module on a simple dataset.
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
print(np.amin(mahal_dist), np.amax(mahal_dist))
assert(np.amin(mahal_dist) > 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
cov = EmpiricalCovariance()
assert_warns(UserWarning, cov.fit, X_1sample)
assert_array_almost_equal(cov.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
# Tests ShrunkCovariance module on a simple dataset.
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
# Tests LedoitWolf module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
lw = LedoitWolf()
assert_warns(UserWarning, lw.fit, X_1sample)
assert_array_almost_equal(lw.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def test_ledoit_wolf_large():
# test that ledoit_wolf doesn't error on data that is wider than block_size
rng = np.random.RandomState(0)
# use a number of features that is larger than the block-size
X = rng.normal(size=(10, 20))
lw = LedoitWolf(block_size=10).fit(X)
# check that covariance is about diagonal (random normal noise)
assert_almost_equal(lw.covariance_, np.eye(20), 0)
cov = lw.covariance_
# check that the result is consistent with not splitting data into blocks.
lw = LedoitWolf(block_size=25).fit(X)
assert_almost_equal(lw.covariance_, cov)
def test_oas():
# Tests OAS module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
# Same tests without assuming centered data--------------------------------
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
oa = OAS()
assert_warns(UserWarning, oa.fit, X_1sample)
assert_array_almost_equal(oa.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
| bsd-3-clause |
e-koch/FilFinder | examples/paper_figures/run_gouldbelt.py | 3 | 6521 | # Licensed under an MIT open source license - see LICENSE
'''
Script to run fil_finder on the Herschel Gould Belt data set.
Can be run on multiple cores.
Data can be downloaded at http://www.herschel.fr/cea/gouldbelt/en/Phocea/Vie_des_labos/Ast/ast_visu.php?id_ast=66.
'''
from fil_finder import *
from astropy.io import fits
from astropy import convolution
import os
from shutil import move
from datetime import datetime
from pandas import DataFrame
from scipy.ndimage import zoom
# Attempt at multi-core implementation
def wrapper(filename, distance, beamwidth, offset, verbose=False):
print "Running " + filename + " at " + str(datetime.now())
# hdu = fits.open(filename)
# img = hdu[1].data
# hdr = hdu[1].data
img, hdr = fits.getdata("../" + filename, header=True)
img = img + offset
if not os.path.exists(filename[:-5]):
os.makedirs(filename[:-5])
# Convolve to the distance of IC-5146 (460 pc)
convolve_to_common = True
regrid_to_common = True
if convolve_to_common:
r = 460. / float(distance)
if r != 1.:
conv = np.sqrt(r ** 2. - 1) * \
(beamwidth / np.sqrt(8*np.log(2)) / (np.abs(hdr["CDELT2"]) * 3600.))
if conv > 1.5:
kernel = convolution.Gaussian2DKernel(conv)
good_pixels = np.isfinite(img)
nan_pix = np.ones(img.shape)
nan_pix[good_pixels == 0] = np.NaN
img = convolution.convolve(img, kernel, boundary='fill',
fill_value=np.NaN)
# Avoid edge effects from smoothing
img = img * nan_pix
beamwidth *= r
if regrid_to_common:
# Regrid to nearest distance, which for this data set is Taurus at 140 pc
r = float(distance) / 140.
if r != 1:
good_pixels = np.isfinite(img)
good_pixels = zoom(good_pixels, round(r, 3),
order=0)
img[np.isnan(img)] = 0.0
regrid_conv_img = zoom(img, round(r, 3))
nan_pix = np.ones(regrid_conv_img.shape)
nan_pix[good_pixels == 0] = np.NaN
img = regrid_conv_img * nan_pix
distance = 140.
hdr['CDELT2'] /= r
# Toggle saving of the exact maps used in the algorithm
save_regrid_convolve = True
if save_regrid_convolve:
hdr['NAXIS1'] = img.shape[1]
hdr['NAXIS2'] = img.shape[0]
hdu = fits.PrimaryHDU(img.astype(">f4"), header=hdr)
hdu.writeto(filename[:-5]+"/"+filename[:-5]+"_regrid_convolved.fits")
print filename, distance
filfind = fil_finder_2D(img, hdr, beamwidth,
distance=distance, glob_thresh=20)
print filfind.beamwidth, filfind.imgscale
save_name = filename[:-5]
filfind.create_mask()
filfind.medskel(verbose=verbose)
filfind.analyze_skeletons()
filfind.compute_filament_brightness()
filfind.exec_rht(branches=True)
# Save the branches output separately
for i in range(len(filfind.rht_curvature["Median"])):
vals = np.vstack([filfind.rht_curvature[key][i] for key in filfind.rht_curvature.keys()]).T
if i == 0:
branches_rht = vals
else:
branches_rht = np.vstack((branches_rht, vals))
df = DataFrame(branches_rht, columns=filfind.rht_curvature.keys())
df.to_csv(filename[:-5] + "_rht_branches.csv")
move(filename[:-5] + "_rht_branches.csv", filename[:-5])
filfind.exec_rht(branches=False)
filfind.find_widths(verbose=verbose)
filfind.save_table(save_name=save_name, table_type="fits")
# filfind.save_table(save_name=save_name, table_type="csv")
filfind.save_fits(save_name=save_name, stamps=False)
try:
move(filename[:-5] + "_table.fits", filename[:-5])
except:
pass
# Move the stamps folder
try:
move("stamps_" + filename[:-5], filename[:-5])
except:
pass
move(filename[:-5] + "_mask.fits", filename[:-5])
move(filename[:-5] + "_skeletons.fits", filename[:-5])
move(filename[:-5] + "_filament_model.fits", filename[:-5])
del filfind, img, hdr
def single_input(a):
return wrapper(*a)
if __name__ == "__main__":
# from multiprocessing import Pool
from interruptible_pool import InterruptiblePool as Pool
from itertools import izip
MULTICORE = bool(raw_input("Run on multiple cores? (T or blank): "))
if MULTICORE:
NCORES = int(raw_input("How many cores to use? "))
# os.chdir("/srv/astro/erickoch/gould_belt/degrade_all/")
fits250 = ["pipeCenterB59-250.fits", "lupusI-250.fits", "aquilaM2-250.fits", "orionB-250.fits", "polaris-250.fits",
"chamaeleonI-250_normed.fits", "perseus04-250.fits", "taurusN3-250.fits", "ic5146-250.fits",
"orionA-C-250.fits", "orionA-S-250.fits", "california_cntr-250_normed.fits", "california_east-250_normed.fits",
"california_west-250_normed.fits"]
fits350 = ["pipeCenterB59-350.fits", "lupusI-350.fits", "aquilaM2-350.fits", "orionB-350.fits", "polaris-350.fits",
"chamaeleonI-350.fits", "perseus04-350.fits", "taurusN3-350.fits", "ic5146-350.fits",
"orionA-C-350.fits", "orionA-S-350.fits", "california_cntr-350.fits", "california_east-350.fits",
"california_west-350.fits"]
distances = [145., 150., 260., 400., 150., 170., 235.,
140., 460., 400., 400., 450., 450., 450.] # pc
offsets = [31.697, 14.437, 85.452, 26.216, 9.330, -879.063, 23.698,
21.273, 20.728, 32.616, 35.219, 9.005, 10.124, 14.678]
beamwidth_250 = [18.2] * len(fits250)
beamwidth_350 = [24.9] * len(fits350)
# Inputs (adjust to desired wavelength)
beamwidths = beamwidth_350 # + beamwidth_350
distances = distances # + distances
fits_files = fits350 # + fits350
print "Started at " + str(datetime.now())
if not MULTICORE:
for i, filename in enumerate(fits_files):
wrapper(filename, distances[i], beamwidths[i], offsets[i], verbose=False)
else:
pool = Pool(processes=NCORES)
pool.map(single_input, izip(fits_files, distances, beamwidths, offsets))
pool.close()
# pool.join()
| mit |
hrjn/scikit-learn | sklearn/utils/estimator_checks.py | 16 | 64623 | from __future__ import print_function
import types
import warnings
import sys
import traceback
import pickle
from copy import deepcopy
import numpy as np
from scipy import sparse
from scipy.stats import rankdata
import struct
from sklearn.externals.six.moves import zip
from sklearn.externals.joblib import hash, Memory
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_dict_equal
from sklearn.base import (clone, ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin, BaseEstimator)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.pipeline import make_pipeline
from sklearn.exceptions import ConvergenceWarning
from sklearn.exceptions import DataConversionWarning
from sklearn.exceptions import SkipTestWarning
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from sklearn.utils.fixes import signature
from sklearn.utils.validation import has_fit_parameter
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet',
'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcess',
'GaussianProcessRegressor',
'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso',
'LassoLars', 'LinearRegression', 'MultiTaskElasticNet',
'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV',
'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression',
'RANSACRegressor', 'RadiusNeighborsRegressor',
'RandomForestRegressor', 'Ridge', 'RidgeCV']
def _yield_non_meta_checks(name, Estimator):
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_dtype_object
yield check_sample_weights_pandas_series
yield check_sample_weights_list
yield check_estimators_fit_returns_self
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if name not in ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if name not in ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params
if hasattr(Estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
def _yield_classifier_checks(name, Classifier):
# test classifiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
yield check_classifiers_regression_target
if (name not in
["MultinomialNB", "LabelPropagation", "LabelSpreading"] and
# TODO some complication with -1 label
name not in ["DecisionTreeClassifier", "ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
yield check_supervised_y_2d
# test if NotFittedError is raised
yield check_estimators_unfitted
if 'class_weight' in Classifier().get_params().keys():
yield check_class_weight_classifiers
yield check_non_transformer_estimators_n_iter
# test if predict_proba is a monotonic transformation of decision_function
yield check_decision_proba_consistency
@ignore_warnings(category=DeprecationWarning)
def check_supervised_y_no_nan(name, Estimator):
# Checks that the Estimator targets are not NaN.
rng = np.random.RandomState(888)
X = rng.randn(10, 5)
y = np.ones(10) * np.inf
y = multioutput_estimator_convert_y_2d(name, y)
errmsg = "Input contains NaN, infinity or a value too large for " \
"dtype('float64')."
try:
Estimator().fit(X, y)
except ValueError as e:
if str(e) != errmsg:
raise ValueError("Estimator {0} raised warning as expected, but "
"does not match expected error message"
.format(name))
else:
raise ValueError("Estimator {0} should have raised error on fitting "
"array y with NaN value.".format(name))
def _yield_regressor_checks(name, Regressor):
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
yield check_supervised_y_2d
yield check_supervised_y_no_nan
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
if name != "GaussianProcessRegressor":
# Test if NotFittedError is raised
yield check_estimators_unfitted
yield check_non_transformer_estimators_n_iter
def _yield_transformer_checks(name, Transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer',
'FunctionTransformer', 'Normalizer']:
# basic tests
yield check_transformer_general
yield check_transformers_unfitted
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if name not in external_solver:
yield check_transformer_n_iter
def _yield_clustering_checks(name, Clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield check_estimators_partial_fit_n_features
yield check_non_transformer_estimators_n_iter
def _yield_all_checks(name, Estimator):
for check in _yield_non_meta_checks(name, Estimator):
yield check
if issubclass(Estimator, ClassifierMixin):
for check in _yield_classifier_checks(name, Estimator):
yield check
if issubclass(Estimator, RegressorMixin):
for check in _yield_regressor_checks(name, Estimator):
yield check
if issubclass(Estimator, TransformerMixin):
for check in _yield_transformer_checks(name, Estimator):
yield check
if issubclass(Estimator, ClusterMixin):
for check in _yield_clustering_checks(name, Estimator):
yield check
yield check_fit2d_predict1d
yield check_fit2d_1sample
yield check_fit2d_1feature
yield check_fit1d_1feature
yield check_fit1d_1sample
yield check_get_params_invariance
yield check_dict_unchanged
yield check_no_fit_attributes_set_in_init
yield check_dont_overwrite_parameters
def check_estimator(Estimator):
"""Check if estimator adheres to scikit-learn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
Parameters
----------
Estimator : class
Class to check. Estimator is a class object (not an instance).
"""
name = Estimator.__name__
check_parameters_default_constructible(name, Estimator)
for check in _yield_all_checks(name, Estimator):
try:
check(name, Estimator)
except SkipTest as message:
# the only SkipTest thrown currently results from not
# being able to import pandas.
warnings.warn(message, SkipTestWarning)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_testing_parameters(estimator):
# set parameters to speed up some estimators and
# avoid deprecated behaviour
params = estimator.get_params()
if ("n_iter" in params and estimator.__class__.__name__ != "TSNE"):
estimator.set_params(n_iter=5)
if "max_iter" in params:
warnings.simplefilter("ignore", ConvergenceWarning)
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR
if estimator.__class__.__name__ == 'LinearSVR':
estimator.set_params(max_iter=20)
# NMF
if estimator.__class__.__name__ == 'NMF':
estimator.set_params(max_iter=100)
# MLP
if estimator.__class__.__name__ in ['MLPClassifier', 'MLPRegressor']:
estimator.set_params(max_iter=100)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if "decision_function_shape" in params:
# SVC
estimator.set_params(decision_function_shape='ovo')
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X_csr = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
for sparse_format in ['csr', 'csc', 'dok', 'lil', 'coo', 'dia', 'bsr']:
X = X_csr.asformat(sparse_format)
# catch deprecation warnings
with ignore_warnings(category=DeprecationWarning):
if name in ['Scaler', 'StandardScaler']:
estimator = Estimator(with_mean=False)
else:
estimator = Estimator()
set_testing_parameters(estimator)
# fit and predict
try:
with ignore_warnings(category=DeprecationWarning):
estimator.fit(X, y)
if hasattr(estimator, "predict"):
pred = estimator.predict(X)
assert_equal(pred.shape, (X.shape[0],))
if hasattr(estimator, 'predict_proba'):
probs = estimator.predict_proba(X)
assert_equal(probs.shape, (X.shape[0], 4))
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
@ignore_warnings(category=DeprecationWarning)
def check_sample_weights_pandas_series(name, Estimator):
# check that estimators will accept a 'sample_weight' parameter of
# type pandas.Series in the 'fit' function.
estimator = Estimator()
if has_fit_parameter(estimator, "sample_weight"):
try:
import pandas as pd
X = pd.DataFrame([[1, 1], [1, 2], [1, 3], [2, 1], [2, 2], [2, 3]])
y = pd.Series([1, 1, 1, 2, 2, 2])
weights = pd.Series([1] * 6)
try:
estimator.fit(X, y, sample_weight=weights)
except ValueError:
raise ValueError("Estimator {0} raises error if "
"'sample_weight' parameter is of "
"type pandas.Series".format(name))
except ImportError:
raise SkipTest("pandas is not installed: not testing for "
"input of type pandas.Series to class weight.")
@ignore_warnings(category=DeprecationWarning)
def check_sample_weights_list(name, Estimator):
# check that estimators will accept a 'sample_weight' parameter of
# type list in the 'fit' function.
estimator = Estimator()
if has_fit_parameter(estimator, "sample_weight"):
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
sample_weight = [3] * 10
# Test that estimators don't raise any exception
estimator.fit(X, y, sample_weight=sample_weight)
@ignore_warnings(category=(DeprecationWarning, UserWarning))
def check_dtype_object(name, Estimator):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = rng.rand(40, 10).astype(object)
y = (X[:, 0] * 4).astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, "transform"):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string or a number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
@ignore_warnings
def check_dict_unchanged(name, Estimator):
# this estimator raises
# ValueError: Found array with 0 feature(s) (shape=(23, 0))
# while a minimum of 1 is required.
# error
if name in ['SpectralCoclustering']:
return
rnd = np.random.RandomState(0)
if name in ['RANSACRegressor']:
X = 3 * rnd.uniform(size=(20, 3))
else:
X = 2 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
if hasattr(estimator, "n_best"):
estimator.n_best = 1
set_random_state(estimator, 1)
# should be just `estimator.fit(X, y)`
# after merging #6141
if name in ['SpectralBiclustering']:
estimator.fit(X)
else:
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
dict_before = estimator.__dict__.copy()
getattr(estimator, method)(X)
assert_dict_equal(estimator.__dict__, dict_before,
'Estimator changes __dict__ during %s' % method)
def is_public_parameter(attr):
return not (attr.startswith('_') or attr.endswith('_'))
def check_dont_overwrite_parameters(name, Estimator):
# check that fit method only changes or sets private attributes
if hasattr(Estimator.__init__, "deprecated_original"):
# to not check deprecated classes
return
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
dict_before_fit = estimator.__dict__.copy()
estimator.fit(X, y)
dict_after_fit = estimator.__dict__
public_keys_after_fit = [key for key in dict_after_fit.keys()
if is_public_parameter(key)]
attrs_added_by_fit = [key for key in public_keys_after_fit
if key not in dict_before_fit.keys()]
# check that fit doesn't add any public attribute
assert_true(not attrs_added_by_fit,
('Estimator adds public attribute(s) during'
' the fit method.'
' Estimators are only allowed to add private attributes'
' either started with _ or ended'
' with _ but %s added' % ', '.join(attrs_added_by_fit)))
# check that fit doesn't change any public attribute
attrs_changed_by_fit = [key for key in public_keys_after_fit
if (dict_before_fit[key]
is not dict_after_fit[key])]
assert_true(not attrs_changed_by_fit,
('Estimator changes public attribute(s) during'
' the fit method. Estimators are only allowed'
' to change attributes started'
' or ended with _, but'
' %s changed' % ', '.join(attrs_changed_by_fit)))
def check_fit2d_predict1d(name, Estimator):
# check by fitting a 2d array and predicting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
assert_raise_message(ValueError, "Reshape your data",
getattr(estimator, method), X[0])
@ignore_warnings
def check_fit2d_1sample(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(1, 10))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit2d_1feature(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(10, 1))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1feature(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = X.astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1sample(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = np.array([1])
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings(category=DeprecationWarning)
def check_transformer_general(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
@ignore_warnings(category=DeprecationWarning)
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def check_transformers_unfitted(name, Transformer):
X, y = _boston_subset()
with ignore_warnings(category=DeprecationWarning):
transformer = Transformer()
assert_raises((AttributeError, ValueError), transformer.transform, X)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
transformer = Transformer()
set_random_state(transformer)
set_testing_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
# fit_transform method should work on non fitted estimator
transformer_clone = clone(transformer)
X_pred = transformer_clone.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
# check for consistent n_samples
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
assert_equal(len(X_pred2), n_samples)
assert_equal(len(X_pred3), n_samples)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
@ignore_warnings
def check_pipeline_consistency(name, Estimator):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_array_almost_equal(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, Estimator):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = [p.name for p in signature(func).parameters.values()]
assert_true(args[1] in ["y", "Y"],
"Expected y or Y as second argument for method "
"%s of %s. Got arguments: %r."
% (func_name, Estimator.__name__, args))
@ignore_warnings
def check_estimators_dtypes(name, Estimator):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(name, y)
methods = ["predict", "transform", "decision_function", "predict_proba"]
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in methods:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
@ignore_warnings(category=DeprecationWarning)
def check_estimators_empty_data_messages(name, Estimator):
e = Estimator()
set_testing_parameters(e)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
assert_raises(ValueError, e.fit, X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(name, np.array([1, 0, 1]))
msg = ("0 feature\(s\) \(shape=\(3, 0\)\) while a minimum of \d* "
"is required.")
assert_raises_regex(ValueError, msg, e.fit, X_zero_features, y)
def check_estimators_nan_inf(name, Estimator):
# Checks that Estimator X's do not contain NaN or inf.
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with ignore_warnings(category=DeprecationWarning):
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
@ignore_warnings
def check_estimators_pickle(name, Estimator):
"""Test that we can pickle all estimators"""
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
# some estimators can't do features less than 0
X -= X.min()
# some estimators only take multioutputs
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_random_state(estimator)
set_testing_parameters(estimator)
estimator.fit(X, y)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
if Estimator.__module__.startswith('sklearn.'):
assert_true(b"version" in pickled_estimator)
unpickled_estimator = pickle.loads(pickled_estimator)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_array_almost_equal(result[method], unpickled_result)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with ignore_warnings(category=DeprecationWarning):
alg = Alg()
if not hasattr(alg, 'partial_fit'):
# check again as for mlp this depends on algorithm
return
set_testing_parameters(alg)
try:
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
except NotImplementedError:
return
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with ignore_warnings(category=DeprecationWarning):
alg = Alg()
set_testing_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name == 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_clusterer_compute_labels_predict(name, Clusterer):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = Clusterer()
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with ignore_warnings(category=DeprecationWarning):
classifier = Classifier()
set_testing_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
@ignore_warnings # Warnings are raised by decision function
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_testing_parameters(classifier)
set_random_state(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3 and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
if hasattr(classifier, "predict_log_proba"):
# predict_log_proba is a transformation of predict_proba
y_log_prob = classifier.predict_log_proba(X)
assert_array_almost_equal(y_log_prob, np.log(y_prob), 8)
assert_array_equal(np.argsort(y_log_prob), np.argsort(y_prob))
@ignore_warnings(category=DeprecationWarning)
def check_estimators_fit_returns_self(name, Estimator):
"""Check if self is returned when calling fit"""
X, y = make_blobs(random_state=0, n_samples=9, n_features=4)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
assert_true(estimator.fit(X, y) is estimator)
@ignore_warnings
def check_estimators_unfitted(name, Estimator):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise either AttributeError or ValueError.
The specific exception type NotFittedError inherits from both and can
therefore be adequately raised for that purpose.
"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
est = Estimator()
msg = "fit"
if hasattr(est, 'predict'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict, X)
if hasattr(est, 'decision_function'):
assert_raise_message((AttributeError, ValueError), msg,
est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_log_proba, X)
@ignore_warnings(category=DeprecationWarning)
def check_supervised_y_2d(name, Estimator):
if "MultiTask" in name:
# These only work on 2d, so this test makes no sense
return
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
if name not in MULTI_OUTPUT:
# check that we warned if we don't support multi-output
assert_greater(len(w), 0, msg)
assert_true("DataConversionWarning('A column-vector y"
" was passed when a 1d array was expected" in msg)
assert_array_almost_equal(y_pred.ravel(), y_pred_2d.ravel())
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
with ignore_warnings(category=DeprecationWarning):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_testing_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
@ignore_warnings(category=DeprecationWarning)
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_testing_parameters(regressor_1)
set_testing_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
@ignore_warnings(category=DeprecationWarning)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y.reshape(-1, 1)) # X is already scaled
y = y.ravel()
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
regressor = Regressor()
set_testing_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert_equal(y_pred.shape, y_.shape)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
assert_greater(regressor.score(X, y_), 0.5)
@ignore_warnings
def check_regressors_no_decision_function(name, Regressor):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
y = multioutput_estimator_convert_y_2d(name, X[:, 0])
regressor = Regressor()
set_testing_parameters(regressor)
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
def check_class_weight_classifiers(name, Classifier):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with ignore_warnings(category=DeprecationWarning):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.89)
def check_class_weight_balanced_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with ignore_warnings(category=DeprecationWarning):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
with ignore_warnings(category=DeprecationWarning):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_balanced, coef_manual)
@ignore_warnings(category=DeprecationWarning)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
# Make a physical copy of the original estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
def check_no_fit_attributes_set_in_init(name, Estimator):
"""Check that Estimator.__init__ doesn't set trailing-_ attributes."""
estimator = Estimator()
for attr in dir(estimator):
if attr.endswith("_") and not attr.startswith("__"):
# This check is for properties, they can be listed in dir
# while at the same time have hasattr return False as long
# as the property getter raises an AttributeError
assert_false(
hasattr(estimator, attr),
"By convention, attributes ending with '_' are "
'estimated from data in scikit-learn. Consequently they '
'should not be initialized in the constructor of an '
'estimator but in the fit method. Attribute {!r} '
'was found in estimator {}'.format(attr, name))
def check_sparsify_coefficients(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
@ignore_warnings(category=DeprecationWarning)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_testing_parameters(estimator_1)
set_testing_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LinearDiscriminantAnalysis()
# test default-constructibility
# get rid of deprecation warnings
with ignore_warnings(category=DeprecationWarning):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(estimator.set_params() is estimator)
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
def param_filter(p):
"""Identify hyper parameters of an estimator"""
return (p.name != 'self' and
p.kind != p.VAR_KEYWORD and
p.kind != p.VAR_POSITIONAL)
init_params = [p for p in signature(init).parameters.values()
if param_filter(p)]
except (TypeError, ValueError):
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they can need a non-default argument
init_params = init_params[1:]
for init_param in init_params:
assert_not_equal(init_param.default, init_param.empty,
"parameter %s for %s has no default value"
% (init_param.name, type(estimator).__name__))
assert_in(type(init_param.default),
[str, int, float, bool, tuple, type(None),
np.float64, types.FunctionType, Memory])
if init_param.name not in params.keys():
# deprecated parameter, not in get_params
assert_true(init_param.default is None)
continue
param_value = params[init_param.name]
if isinstance(param_value, np.ndarray):
assert_array_equal(param_value, init_param.default)
else:
assert_equal(param_value, init_param.default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if "MultiTask" in name:
return np.reshape(y, (-1, 1))
return y
@ignore_warnings(category=DeprecationWarning)
def check_non_transformer_estimators_n_iter(name, Estimator):
# Test that estimators that are not transformers with a parameter
# max_iter, return the attribute of n_iter_ at least 1.
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
not_run_check_n_iter = ['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV', 'LinearSVC',
'LogisticRegression']
# Tested in test_transformer_n_iter
not_run_check_n_iter += CROSS_DECOMPOSITION
if name in not_run_check_n_iter:
return
# LassoLars stops early for the default alpha=1.0 the iris dataset.
if name == 'LassoLars':
estimator = Estimator(alpha=0.)
else:
estimator = Estimator()
if hasattr(estimator, 'max_iter'):
iris = load_iris()
X, y_ = iris.data, iris.target
y_ = multioutput_estimator_convert_y_2d(name, y_)
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
# HuberRegressor depends on scipy.optimize.fmin_l_bfgs_b
# which doesn't return a n_iter for old versions of SciPy.
if not (name == 'HuberRegressor' and estimator.n_iter_ is None):
assert_greater_equal(estimator.n_iter_, 1)
@ignore_warnings(category=DeprecationWarning)
def check_transformer_n_iter(name, Estimator):
# Test that transformers with a parameter max_iter, return the
# attribute of n_iter_ at least 1.
estimator = Estimator()
if hasattr(estimator, "max_iter"):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater_equal(iter_, 1)
else:
assert_greater_equal(estimator.n_iter_, 1)
@ignore_warnings(category=DeprecationWarning)
def check_get_params_invariance(name, estimator):
# Checks if get_params(deep=False) is a subset of get_params(deep=True)
class T(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
pass
def fit(self, X, y):
return self
def transform(self, X):
return X
if name in ('FeatureUnion', 'Pipeline'):
e = estimator([('clf', T())])
elif name in ('GridSearchCV', 'RandomizedSearchCV', 'SelectFromModel'):
return
else:
e = estimator()
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert_true(all(item in deep_params.items() for item in
shallow_params.items()))
def check_classifiers_regression_target(name, Estimator):
# Check if classifier throws an exception when fed regression targets
boston = load_boston()
X, y = boston.data, boston.target
e = Estimator()
msg = 'Unknown label type: '
assert_raises_regex(ValueError, msg, e.fit, X, y)
@ignore_warnings(category=DeprecationWarning)
def check_decision_proba_consistency(name, Estimator):
# Check whether an estimator having both decision_function and
# predict_proba methods has outputs with perfect rank correlation.
centers = [(2, 2), (4, 4)]
X, y = make_blobs(n_samples=100, random_state=0, n_features=4,
centers=centers, cluster_std=1.0, shuffle=True)
X_test = np.random.randn(20, 2) + 4
estimator = Estimator()
set_testing_parameters(estimator)
if (hasattr(estimator, "decision_function") and
hasattr(estimator, "predict_proba")):
estimator.fit(X, y)
a = estimator.predict_proba(X_test)[:, 1]
b = estimator.decision_function(X_test)
assert_array_equal(rankdata(a), rankdata(b))
| bsd-3-clause |
arokem/nipy | doc/conf.py | 5 | 6641 | # emacs: -*- coding: utf-8; mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# sampledoc documentation build configuration file, created by
# sphinx-quickstart on Tue Jun 3 12:40:24 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys, os
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.append(os.path.abspath('sphinxext'))
# Get project related strings. Please do not change this line to use
# execfile because execfile is not available in Python 3
_info_fname = os.path.join('..', 'nipy', 'info.py')
rel = {}
exec(open(_info_fname, 'rt').read(), {}, rel)
# Import support for ipython console session syntax highlighting (lives
# in the sphinxext directory defined above)
import ipython_console_highlighting
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.pngmath',
'sphinx.ext.autosummary',
'sphinx.ext.inheritance_diagram',
'numpy_ext.numpydoc',
'matplotlib.sphinxext.plot_directive',
'matplotlib.sphinxext.only_directives', # needed for above
]
# Autosummary on
autosummary_generate=True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'nipy'
#copyright = ':ref:`2005-2010, Neuroimaging in Python team. <nipy-software-license>`'
copyright = '2005-2013, Neuroimaging in Python team'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = rel['__version__']
# The full version, including alpha/beta/rc tags.
release = version
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
unused_docs = []
# List of directories, relative to source directories, that shouldn't
# be searched for source files.
# exclude_trees = []
# what to put into API doc (just class doc, just init, or both)
autoclass_content = 'class'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
#
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'sphinxdoc'
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'nipy.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'NIPY Documentation'
# The name of an image file (within the static path) to place at the top of
# the sidebar.
#html_logo = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Content template for the index page.
html_index = 'index.html'
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {'index': 'indexsidebar.html'}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If true, the reST sources are included in the HTML build as _sources/<name>.
html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = project
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class
# [howto/manual]).
latex_documents = [
('documentation', 'nipy.tex', 'Neuroimaging in Python Documentation',
ur'Neuroimaging in Python team.','manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_use_parts = True
# Additional stuff for the LaTeX preamble.
latex_preamble = """
\usepackage{amsmath}
\usepackage{amssymb}
% Uncomment these two if needed
%\usepackage{amsfonts}
%\usepackage{txfonts}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = True
| bsd-3-clause |
toobaz/pandas | doc/source/conf.py | 1 | 23559 | #
# pandas documentation build configuration file, created by
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import inspect
import importlib
import logging
import jinja2
from sphinx.ext.autosummary import _import_by_name
from numpydoc.docscrape import NumpyDocString
logger = logging.getLogger(__name__)
# https://github.com/sphinx-doc/sphinx/pull/2325/files
# Workaround for sphinx-build recursion limit overflow:
# pickle.dump(doctree, f, pickle.HIGHEST_PROTOCOL)
# RuntimeError: maximum recursion depth exceeded while pickling an object
#
# Python's default allowed recursion depth is 1000.
sys.setrecursionlimit(5000)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.append(os.path.abspath('.'))
sys.path.insert(0, os.path.abspath("../sphinxext"))
sys.path.extend(
[
# numpy standard doc extensions
os.path.join(os.path.dirname(__file__), "..", "../..", "sphinxext")
]
)
# -- General configuration -----------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
# sphinxext.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.doctest",
"sphinx.ext.extlinks",
"sphinx.ext.todo",
"numpydoc", # handle NumPy documentation formatted docstrings
"IPython.sphinxext.ipython_directive",
"IPython.sphinxext.ipython_console_highlighting",
"matplotlib.sphinxext.plot_directive",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.ifconfig",
"sphinx.ext.linkcode",
"nbsphinx",
"contributors", # custom pandas extension
]
exclude_patterns = ["**.ipynb_checkpoints"]
try:
import nbconvert
except ImportError:
logger.warn("nbconvert not installed. Skipping notebooks.")
exclude_patterns.append("**/*.ipynb")
else:
try:
nbconvert.utils.pandoc.get_pandoc_version()
except nbconvert.utils.pandoc.PandocMissing:
logger.warn("Pandoc not installed. Skipping notebooks.")
exclude_patterns.append("**/*.ipynb")
# sphinx_pattern can be '-api' to exclude the API pages,
# the path to a file, or a Python object
# (e.g. '10min.rst' or 'pandas.DataFrame.head')
source_path = os.path.dirname(os.path.abspath(__file__))
pattern = os.environ.get("SPHINX_PATTERN")
if pattern:
for dirname, dirs, fnames in os.walk(source_path):
for fname in fnames:
if os.path.splitext(fname)[-1] in (".rst", ".ipynb"):
fname = os.path.relpath(os.path.join(dirname, fname), source_path)
if fname == "index.rst" and os.path.abspath(dirname) == source_path:
continue
elif pattern == "-api" and dirname == "reference":
exclude_patterns.append(fname)
elif pattern != "-api" and fname != pattern:
exclude_patterns.append(fname)
with open(os.path.join(source_path, "index.rst.template")) as f:
t = jinja2.Template(f.read())
with open(os.path.join(source_path, "index.rst"), "w") as f:
f.write(
t.render(
include_api=pattern is None,
single_doc=(pattern if pattern is not None and pattern != "-api" else None),
)
)
autosummary_generate = True if pattern is None else ["index"]
# numpydoc
numpydoc_attributes_as_param_list = False
# matplotlib plot directive
plot_include_source = True
plot_formats = [("png", 90)]
plot_html_show_formats = False
plot_html_show_source_link = False
plot_pre_code = """import numpy as np
import pandas as pd"""
# Add any paths that contain templates here, relative to this directory.
templates_path = ["../_templates"]
# The suffix of source filenames.
source_suffix = [".rst"]
# The encoding of source files.
source_encoding = "utf-8"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "pandas"
copyright = "2008-2014, the pandas development team"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import pandas
# version = '%s r%s' % (pandas.__version__, svn_version())
version = str(pandas.__version__)
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
# unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all
# documents. default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = "nature_with_gtoc"
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
# html_style = 'statsmodels.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["themes"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = os.path.join(html_static_path[0], "favicon.ico")
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# Add redirect for previously existing API pages
# each item is like `(from_old, to_new)`
# To redirect a class and all its methods, see below
# https://github.com/pandas-dev/pandas/issues/16186
moved_api_pages = [
("pandas.core.common.isnull", "pandas.isna"),
("pandas.core.common.notnull", "pandas.notna"),
("pandas.core.reshape.get_dummies", "pandas.get_dummies"),
("pandas.tools.merge.concat", "pandas.concat"),
("pandas.tools.merge.merge", "pandas.merge"),
("pandas.tools.pivot.pivot_table", "pandas.pivot_table"),
("pandas.tseries.tools.to_datetime", "pandas.to_datetime"),
("pandas.io.clipboard.read_clipboard", "pandas.read_clipboard"),
("pandas.io.excel.ExcelFile.parse", "pandas.ExcelFile.parse"),
("pandas.io.excel.read_excel", "pandas.read_excel"),
("pandas.io.gbq.read_gbq", "pandas.read_gbq"),
("pandas.io.html.read_html", "pandas.read_html"),
("pandas.io.json.read_json", "pandas.read_json"),
("pandas.io.parsers.read_csv", "pandas.read_csv"),
("pandas.io.parsers.read_fwf", "pandas.read_fwf"),
("pandas.io.parsers.read_table", "pandas.read_table"),
("pandas.io.pickle.read_pickle", "pandas.read_pickle"),
("pandas.io.pytables.HDFStore.append", "pandas.HDFStore.append"),
("pandas.io.pytables.HDFStore.get", "pandas.HDFStore.get"),
("pandas.io.pytables.HDFStore.put", "pandas.HDFStore.put"),
("pandas.io.pytables.HDFStore.select", "pandas.HDFStore.select"),
("pandas.io.pytables.read_hdf", "pandas.read_hdf"),
("pandas.io.sql.read_sql", "pandas.read_sql"),
("pandas.io.sql.read_frame", "pandas.read_frame"),
("pandas.io.sql.write_frame", "pandas.write_frame"),
("pandas.io.stata.read_stata", "pandas.read_stata"),
]
# Again, tuples of (from_old, to_new)
moved_classes = [
("pandas.tseries.resample.Resampler", "pandas.core.resample.Resampler"),
("pandas.formats.style.Styler", "pandas.io.formats.style.Styler"),
]
for old, new in moved_classes:
# the class itself...
moved_api_pages.append((old, new))
mod, classname = new.rsplit(".", 1)
klass = getattr(importlib.import_module(mod), classname)
methods = [
x for x in dir(klass) if not x.startswith("_") or x in ("__iter__", "__array__")
]
for method in methods:
# ... and each of its public methods
moved_api_pages.append(
(
"{old}.{method}".format(old=old, method=method),
"{new}.{method}".format(new=new, method=method),
)
)
if pattern is None:
html_additional_pages = {
"generated/" + page[0]: "api_redirect.html" for page in moved_api_pages
}
header = """\
.. currentmodule:: pandas
.. ipython:: python
:suppress:
import numpy as np
import pandas as pd
randn = np.random.randn
np.random.seed(123456)
np.set_printoptions(precision=4, suppress=True)
pd.options.display.max_rows = 15
import os
os.chdir(r'{}')
""".format(
os.path.dirname(os.path.dirname(__file__))
)
html_context = {
"redirects": {old: new for old, new in moved_api_pages},
"header": header,
}
# If false, no module index is generated.
html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = "pandas"
# -- Options for nbsphinx ------------------------------------------------
nbsphinx_allow_errors = True
# -- Options for LaTeX output --------------------------------------------
latex_elements = {}
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples (source start
# file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
(
"index",
"pandas.tex",
"pandas: powerful Python data analysis toolkit",
r"Wes McKinney\n\& PyData Development Team",
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_use_modindex = True
if pattern is None:
intersphinx_mapping = {
"dateutil": ("https://dateutil.readthedocs.io/en/latest/", None),
"matplotlib": ("https://matplotlib.org/", None),
"numpy": ("https://docs.scipy.org/doc/numpy/", None),
"pandas-gbq": ("https://pandas-gbq.readthedocs.io/en/latest/", None),
"py": ("https://pylib.readthedocs.io/en/latest/", None),
"python": ("https://docs.python.org/3/", None),
"scipy": ("https://docs.scipy.org/doc/scipy/reference/", None),
"statsmodels": ("http://www.statsmodels.org/devel/", None),
}
# extlinks alias
extlinks = {
"issue": ("https://github.com/pandas-dev/pandas/issues/%s", "GH"),
"wiki": ("https://github.com/pandas-dev/pandas/wiki/%s", "wiki "),
}
ipython_warning_is_error = False
ipython_exec_lines = [
"import numpy as np",
"import pandas as pd",
# This ensures correct rendering on system with console encoding != utf8
# (windows). It forces pandas to encode its output reprs using utf8
# wherever the docs are built. The docs' target is the browser, not
# the console, so this is fine.
'pd.options.display.encoding="utf8"',
]
# Add custom Documenter to handle attributes/methods of an AccessorProperty
# eg pandas.Series.str and pandas.Series.dt (see GH9322)
import sphinx
from sphinx.util import rpartition
from sphinx.ext.autodoc import Documenter, MethodDocumenter, AttributeDocumenter
from sphinx.ext.autosummary import Autosummary
class AccessorDocumenter(MethodDocumenter):
"""
Specialized Documenter subclass for accessors.
"""
objtype = "accessor"
directivetype = "method"
# lower than MethodDocumenter so this is not chosen for normal methods
priority = 0.6
def format_signature(self):
# this method gives an error/warning for the accessors, therefore
# overriding it (accessor has no arguments)
return ""
class AccessorLevelDocumenter(Documenter):
"""
Specialized Documenter subclass for objects on accessor level (methods,
attributes).
"""
# This is the simple straightforward version
# modname is None, base the last elements (eg 'hour')
# and path the part before (eg 'Series.dt')
# def resolve_name(self, modname, parents, path, base):
# modname = 'pandas'
# mod_cls = path.rstrip('.')
# mod_cls = mod_cls.split('.')
#
# return modname, mod_cls + [base]
def resolve_name(self, modname, parents, path, base):
if modname is None:
if path:
mod_cls = path.rstrip(".")
else:
mod_cls = None
# if documenting a class-level object without path,
# there must be a current class, either from a parent
# auto directive ...
mod_cls = self.env.temp_data.get("autodoc:class")
# ... or from a class directive
if mod_cls is None:
mod_cls = self.env.temp_data.get("py:class")
# ... if still None, there's no way to know
if mod_cls is None:
return None, []
# HACK: this is added in comparison to ClassLevelDocumenter
# mod_cls still exists of class.accessor, so an extra
# rpartition is needed
modname, accessor = rpartition(mod_cls, ".")
modname, cls = rpartition(modname, ".")
parents = [cls, accessor]
# if the module name is still missing, get it like above
if not modname:
modname = self.env.temp_data.get("autodoc:module")
if not modname:
if sphinx.__version__ > "1.3":
modname = self.env.ref_context.get("py:module")
else:
modname = self.env.temp_data.get("py:module")
# ... else, it stays None, which means invalid
return modname, parents + [base]
class AccessorAttributeDocumenter(AccessorLevelDocumenter, AttributeDocumenter):
objtype = "accessorattribute"
directivetype = "attribute"
# lower than AttributeDocumenter so this is not chosen for normal
# attributes
priority = 0.6
class AccessorMethodDocumenter(AccessorLevelDocumenter, MethodDocumenter):
objtype = "accessormethod"
directivetype = "method"
# lower than MethodDocumenter so this is not chosen for normal methods
priority = 0.6
class AccessorCallableDocumenter(AccessorLevelDocumenter, MethodDocumenter):
"""
This documenter lets us removes .__call__ from the method signature for
callable accessors like Series.plot
"""
objtype = "accessorcallable"
directivetype = "method"
# lower than MethodDocumenter; otherwise the doc build prints warnings
priority = 0.5
def format_name(self):
return MethodDocumenter.format_name(self).rstrip(".__call__")
class PandasAutosummary(Autosummary):
"""
This alternative autosummary class lets us override the table summary for
Series.plot and DataFrame.plot in the API docs.
"""
def _replace_pandas_items(self, display_name, sig, summary, real_name):
# this a hack: ideally we should extract the signature from the
# .__call__ method instead of hard coding this
if display_name == "DataFrame.plot":
sig = "([x, y, kind, ax, ....])"
summary = "DataFrame plotting accessor and method"
elif display_name == "Series.plot":
sig = "([kind, ax, figsize, ....])"
summary = "Series plotting accessor and method"
return (display_name, sig, summary, real_name)
@staticmethod
def _is_deprecated(real_name):
try:
obj, parent, modname = _import_by_name(real_name)
except ImportError:
return False
doc = NumpyDocString(obj.__doc__ or "")
summary = "".join(doc["Summary"] + doc["Extended Summary"])
return ".. deprecated::" in summary
def _add_deprecation_prefixes(self, items):
for item in items:
display_name, sig, summary, real_name = item
if self._is_deprecated(real_name):
summary = "(DEPRECATED) %s" % summary
yield display_name, sig, summary, real_name
def get_items(self, names):
items = Autosummary.get_items(self, names)
items = [self._replace_pandas_items(*item) for item in items]
items = list(self._add_deprecation_prefixes(items))
return items
# based on numpy doc/source/conf.py
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != "py":
return None
modname = info["module"]
fullname = info["fullname"]
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split("."):
try:
obj = getattr(obj, part)
except AttributeError:
return None
try:
# inspect.unwrap() was added in Python version 3.4
if sys.version_info >= (3, 5):
fn = inspect.getsourcefile(inspect.unwrap(obj))
else:
fn = inspect.getsourcefile(obj)
except TypeError:
fn = None
if not fn:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except OSError:
lineno = None
if lineno:
linespec = "#L{:d}-L{:d}".format(lineno, lineno + len(source) - 1)
else:
linespec = ""
fn = os.path.relpath(fn, start=os.path.dirname(pandas.__file__))
if "+" in pandas.__version__:
return "http://github.com/pandas-dev/pandas/blob/master/pandas/" "{}{}".format(
fn, linespec
)
else:
return "http://github.com/pandas-dev/pandas/blob/" "v{}/pandas/{}{}".format(
pandas.__version__, fn, linespec
)
# remove the docstring of the flags attribute (inherited from numpy ndarray)
# because these give doc build errors (see GH issue 5331)
def remove_flags_docstring(app, what, name, obj, options, lines):
if what == "attribute" and name.endswith(".flags"):
del lines[:]
def process_class_docstrings(app, what, name, obj, options, lines):
"""
For those classes for which we use ::
:template: autosummary/class_without_autosummary.rst
the documented attributes/methods have to be listed in the class
docstring. However, if one of those lists is empty, we use 'None',
which then generates warnings in sphinx / ugly html output.
This "autodoc-process-docstring" event connector removes that part
from the processed docstring.
"""
if what == "class":
joined = "\n".join(lines)
templates = [
""".. rubric:: Attributes
.. autosummary::
:toctree:
None
""",
""".. rubric:: Methods
.. autosummary::
:toctree:
None
""",
]
for template in templates:
if template in joined:
joined = joined.replace(template, "")
lines[:] = joined.split("\n")
suppress_warnings = [
# We "overwrite" autosummary with our PandasAutosummary, but
# still want the regular autosummary setup to run. So we just
# suppress this warning.
"app.add_directive"
]
if pattern:
# When building a single document we don't want to warn because references
# to other documents are unknown, as it's expected
suppress_warnings.append("ref.ref")
def rstjinja(app, docname, source):
"""
Render our pages as a jinja template for fancy templating goodness.
"""
# http://ericholscher.com/blog/2016/jul/25/integrating-jinja-rst-sphinx/
# Make sure we're outputting HTML
if app.builder.format != "html":
return
src = source[0]
rendered = app.builder.templates.render_string(src, app.config.html_context)
source[0] = rendered
def setup(app):
app.connect("source-read", rstjinja)
app.connect("autodoc-process-docstring", remove_flags_docstring)
app.connect("autodoc-process-docstring", process_class_docstrings)
app.add_autodocumenter(AccessorDocumenter)
app.add_autodocumenter(AccessorAttributeDocumenter)
app.add_autodocumenter(AccessorMethodDocumenter)
app.add_autodocumenter(AccessorCallableDocumenter)
app.add_directive("autosummary", PandasAutosummary)
| bsd-3-clause |
aewhatley/scikit-learn | sklearn/tests/test_grid_search.py | 68 | 28778 | """
Testing for grid search module (sklearn.grid_search)
"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV,
ParameterGrid, ParameterSampler,
ChangedBehaviorWarning)
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.cross_validation import KFold, StratifiedKFold, FitFailedWarning
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3, y=y)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv:
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(return_indicator=True,
random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(y.shape[0], random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
| bsd-3-clause |
nanophotonics/nplab | nplab/analysis/__init__.py | 1 | 9190 | __author__ = 'alansanders'
import numpy as np
from pathlib import Path
import h5py
from scipy.ndimage import gaussian_filter
from functools import cached_property
def load_h5(location='.'):
'''return the latest h5 in a given directory. If location is left blank,
loads the latest file in the current directory.'''
path = Path(location)
return h5py.File(path / max(f for f in path.iterdir() if f.suffix == '.h5'), 'r')
def latest_scan(file):
'''returns the last ParticleScannerScan in a file'''
return file[max(file, key=lambda x: int(x.split('_')[-1])
if x.startswith('ParticleScannerScan') else -1)]
class Spectrum(np.ndarray):
'''acts a an ndarray, but has a wavelengths attribute,
and several useful methods for spectra. Can be 1 or 2d (time series/z-scan)'''
def __new__(cls, spectrum, wavelengths, *args, **kwargs):
'''boilerplate numpy subclassing'''
assert len(wavelengths) == np.shape(spectrum)[-1]
obj = np.asarray(spectrum).view(cls)
obj.wavelengths = np.asarray(wavelengths)
return obj
def __array_finalize__(self, obj):
'''boilerplate numpy subclassing'''
if obj is None:
return
if not obj.shape:
return np.array(obj)
self.wavelengths = getattr(
obj, 'wavelengths', np.arange(obj.shape[-1]))
@classmethod
def from_h5(cls, dataset):
'''create instance using a h5 dataset.
will background-subtract and reference the spectrum if these
attributes are saved'''
attrs = dataset.attrs
ref = attrs.get('reference', 1)
bg = attrs.get('background', 0)
return cls((dataset[()]-bg)/(ref-bg), dataset.attrs['wavelengths'])
@property
def wl(self):
'''convenient for accessing wavelengths'''
return self.wavelengths
@wl.setter
def wl(self, value):
'''convenient for accessing wavelengths'''
self.wavelengths = np.array(value)
@property
def x(self):
'''abstraction of x axis for using shifts or wavelengths'''
return self.wavelengths # wavelengths unless subclassed
def split(self, lower=-np.inf, upper=np.inf):
'''returns the spectrum between the upper and lower bounds'''
if upper < lower:
upper, lower = lower, upper
condition = (lower <= self.x) & (self.x < upper)
# '<=' allows recombination of an array into the original
return self.__class__(self.T[condition].T, self.x[condition])
def norm(self):
'''return an spectrum divided by its largest value'''
return self.__class__(self/self.ravel().max(), self.x)
def squash(self):
'''condense a time_series into one spectrum'''
return self.__class__(self.sum(axis=0), self.x)
def smooth(self, sigma):
'''smooth using scipy.ndimage.guassian_smooth'''
return self.__class__(gaussian_filter(self, sigma), self.x)
def remove_cosmic_ray(self,
thresh=5,
smooth=30,
max_iterations=10):
'''wrapper around remove_cosmic_ray to allow 2d or 1d spectra
to be passed'''
func = lambda s: remove_cosmic_ray(s,
thresh=thresh,
smooth=smooth,
max_iterations=max_iterations)
if len(self.shape) == 2:
return self.__class__([func(s) for s in self],
self.x,)
return self.__class__(func(self), self.x)
class RamanSpectrum(Spectrum):
'''
Uses shifts as its x axis. These are the values used in split() etc.
When creating, either supply shifts directly, or they'll be calculated
the first time they're accessed using wavelengths and laser_wavelength.
To use with a different laser wavelength, change the class attribute
after importing:
>>> RamanSpectrum.laser_wavelength = 785.
if you frequently use two wavelengths in the same analysis, create a
subclass:
>>> class RamanSpectrum785(RamanSpectrum):
laser_wavelength = 785.
class RamanSpectrum532(RamanSpectrum):
laser_wavelength = 532.
'''
laser_wavelength = 632.8
def __new__(cls,
spectrum,
shifts=None,
wavelengths=None,
*args, **kwargs):
assert not (shifts is None and wavelengths is None),\
'must supply shifts or wavelengths'
obj = np.asarray(spectrum).view(cls)
if wavelengths is not None:
wavelengths = np.asarray(wavelengths)
obj.wavelengths = wavelengths
if shifts is not None:
shifts = np.asarray(shifts)
obj._shifts = shifts
obj.laser_wavelength = cls.laser_wavelength
# stops existing instances' laser_wavelength being changed by changing
# the class attribute
return obj
def __array_finalize__(self, obj):
if obj is None:
return
if not obj.shape:
return np.array(obj)
self.wavelengths = getattr(obj,
'wavelengths',
np.arange(obj.shape[-1]))
self._shifts = getattr(obj, '_shifts', None)
@classmethod
def from_h5(cls, dataset):
'''create instance using a h5 dataset.
will background-subtract and reference the spectrum if these
attributes are saved'''
attrs = dataset.attrs
ref = attrs.get('reference', 1)
bg = attrs.get('background', 0)
return cls((dataset[()]-bg)/(ref-bg), wavelengths=dataset.attrs['wavelengths'])
@cached_property # only ever calculated once per instance
def shifts(self):
if self._shifts is None:
return (1./(self.laser_wavelength*1e-9) - 1./(self.wl*1e-9))/100.
return self._shifts
@property
def x(self):
return self.shifts
def remove_cosmic_ray(spectrum, thresh=5, smooth=30, max_iterations=10):
'''
a way of removing cosmic rays from spectra. Mainly tested with Dark-Field
spectra, as the spikiness of Raman makes it very difficult to do simply.
thresh: the height above the noise level a given data point should be
to be considered a cosmic ray. Lower values will remove smaller cosmic rays,
but may start to remove higher parts of the noise if too low.
smooth: the 'sigma' value used to smooth the spectrum,
see scipy.ndimage.gaussian_filter. Should be high enough to
so that the shape of the spectrum is conserved, but the cosmic ray
is almost gone.
max_iterations:
maximum iterations. Shouldn't matter how high it is as most spectra
are done in 1-3.
'''
_len = len(spectrum)
cleaned = np.copy(spectrum) # prevent modification in place
for i in range(max_iterations):
noise_spectrum = cleaned/gaussian_filter(cleaned, smooth)
# ^ should be a flat, noisy line, with a large spike where there's
# a cosmic ray.
noise_level = np.sqrt(np.var(noise_spectrum))
# average deviation of a datapoint from the mean
mean_noise = noise_spectrum.mean() # should be == 1
spikes = np.arange(_len)[noise_spectrum > mean_noise+(thresh*noise_level)]
# the indices of the datapoints that are above the threshold
# now we add all data points to either side of the spike that are
# above the noise level (but not necessarily the thresh*noise_level)
rays = set()
for spike in spikes:
for side in (-1, 1): # left and right
step = 0
while 0 <= (coord := spike+(side*step)) <= _len-1:
# staying in the spectrum
if noise_spectrum[coord] > mean_noise + noise_level:
rays.add(coord)
step += 1
else:
break
rays = list(rays) # convert to list for indexing
if rays: # if there are any cosmic rays
cleaned[rays] = gaussian_filter(cleaned, smooth)[rays]
# replace the regions with the smooothed spectrum
continue # and repeat, as the smoothed spectrum will still be
# quite affected by the cosmic ray.
# until no cosmic rays are found
return cleaned
return cleaned
if __name__ == '__main__':
import matplotlib.pyplot as plt
wls = np.linspace(633, 750, 1600)
spec = np.random.randint(300, 600, size=1600)
rspec = RamanSpectrum(spec, wavelengths=wls)
RamanSpectrum.laser_wavelength = 700
plt.figure()
plt.plot(rspec.shifts, rspec, label='shifts')
rspec2 = RamanSpectrum(spec, wavelengths=wls)
plt.plot(rspec2.shifts, rspec2, label='center of 700')
plt.legend() | gpl-3.0 |
dinos66/termAnalysis | forTateDataset/quiverTest.py | 1 | 2810 |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import interactive
from scipy.spatial import distance
from matplotlib.pyplot import cm
import matplotlib.colors as colors
n_columns, n_rows = 200, 120
X1 = [43, 51, 31, 5, 66, 22, 194, 66, 20, 45]
Y1 = [76, 54, 35, 3, 69, 16, 100, 46, 53, 101]
X2 = [19, 46, 48, 36, 65, 88, 27, 150, 59, 8]
Y2 = [46, 63, 35, 83, 61, 47, 107, 69, 77, 30]
##import random
##fig, ax = plt.subplots()
##plt.scatter(X1,Y1)
##labels = [str(x) for x in range(len(X1[:10]))]
##for label, x, y in zip(labels, X1, Y1):
## plt.annotate(label, xy = (x, y), xytext = (-20*random.random(), 20*random.random()), textcoords = 'offset points', ha = 'right', va = 'bottom',bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow', alpha = 0.5),
## arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0'))
##plt.xlim(0,n_columns)
##plt.ylim(0,n_rows)
##ax.invert_yaxis()
##plt.title('test 1')
##mng = plt.get_current_fig_manager()
##mng.window.state('zoomed')
##interactive(True)
##plt.show()
##fig.savefig('./test1.png',bbox_inches='tight')
##plt.close()
##interactive(False)
##
##fig, ax = plt.subplots()
##plt.scatter(X2,Y2)
##labels = [str(x) for x in range(len(X2[:10]))]
##for label, x, y in zip(labels, X2, Y2):
## plt.annotate(label, xy = (x, y), xytext = (-20*random.random(), 20*random.random()), textcoords = 'offset points', ha = 'right', va = 'bottom',bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow', alpha = 0.5),
## arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0'))
##plt.xlim(0,n_columns)
##plt.ylim(0,n_rows)
##ax.invert_yaxis()
##plt.title('test 2')
##mng = plt.get_current_fig_manager()
##mng.window.state('zoomed')
##interactive(True)
##plt.show()
##fig.savefig('./test2.png',bbox_inches='tight')
##plt.close()
##interactive(False)
fig, ax = plt.subplots()
a = np.array([2, 3, 5])
b = np.array([1, 1, 0])
c = a+b;
starts = np.zeros((3,3))
ends = np.matrix([a,b,c])
Q = plt.quiver(starts[:,0], starts[:,1], starts[:,2], ends[:,0], ends[:,1], ends[:,2], scale=0)
plt.show()
##fig, ax = plt.subplots()
##X1,Y1 = np.mgrid[-n_columns/2:n_columns/2:1j,-n_columns/2:n_columns/2:1j]#np.array(X1),np.array(Y1))
##Q = plt.streamplot(X1,Y1,np.array(X2),np.array(Y2))
##plt.xlim(0,n_columns)
##plt.ylim(0,n_rows)
### ax.set_xticks(range(0, len(nodes)))#, minor=False)
### ax.xaxis.tick_top()
### ax.set_xticklabels(nodes, minor=False, fontsize = 7, rotation = 90)
### ax.set_yticklabels(list(reversed(nodes)), minor=False, fontsize = 7)
##ax.invert_yaxis()
##plt.title('quiver plot')
##mng = plt.get_current_fig_manager()
##mng.window.state('zoomed')
##interactive(True)
##plt.show()
##fig.savefig('./quivertest.png',bbox_inches='tight')
##plt.close()
##interactive(False)
| apache-2.0 |
marcocaccin/scikit-learn | sklearn/linear_model/setup.py | 146 | 1713 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('linear_model', parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension('cd_fast', sources=['cd_fast.c'],
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]), **blas_info)
config.add_extension('sgd_fast',
sources=['sgd_fast.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
config.add_extension('sag_fast',
sources=['sag_fast.c'],
include_dirs=numpy.get_include())
# add other directories
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
sabyasachi087/sp17-i524 | project/S17-IO-3012/code/bin/benchmark_replicas_find.py | 19 | 5441 | import matplotlib.pyplot as plt
import sys
import pandas as pd
def get_parm():
"""retrieves mandatory parameter to program
@param: none
@type: n/a
"""
try:
return sys.argv[1]
except:
print ('Must enter file name as parameter')
exit()
def read_file(filename):
"""reads a file into a pandas dataframe
@param: filename The name of the file to read
@type: string
"""
try:
return pd.read_csv(filename)
except:
print ('Error retrieving file')
exit()
def select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica):
benchmark_df = benchmark_df[benchmark_df.mongo_version == 34]
benchmark_df = benchmark_df[benchmark_df.test_size == "large"]
if cloud != 'X':
benchmark_df = benchmark_df[benchmark_df.cloud == cloud]
if config_replicas != 'X':
benchmark_df = benchmark_df[benchmark_df.config_replicas == config_replicas]
if mongos_instances != 'X':
benchmark_df = benchmark_df[benchmark_df.mongos_instances == mongos_instances]
if shard_replicas != 'X':
benchmark_df = benchmark_df[benchmark_df.shard_replicas == shard_replicas]
if shards_per_replica != 'X':
benchmark_df = benchmark_df[benchmark_df.shards_per_replica == shards_per_replica]
# benchmark_df1 = benchmark_df.groupby(['cloud', 'config_replicas', 'mongos_instances', 'shard_replicas', 'shards_per_replica']).mean()
# http://stackoverflow.com/questions/10373660/converting-a-pandas-groupby-object-to-dataframe
benchmark_df = benchmark_df.groupby(
['cloud', 'config_replicas', 'mongos_instances', 'shard_replicas', 'shards_per_replica'], as_index=False).mean()
# http://stackoverflow.com/questions/10373660/converting-a-pandas-groupby-object-to-dataframe
# print benchmark_df1['shard_replicas']
# print benchmark_df1
# print benchmark_df
benchmark_df = benchmark_df.sort_values(by='shard_replicas', ascending=1)
return benchmark_df
def make_figure(find_seconds_kilo, replicas_kilo, find_seconds_chameleon, replicas_chameleon, find_seconds_jetstream, replicas_jetstream):
"""formats and creates a line chart
@param1: find_seconds_kilo Array with find_seconds from kilo
@type: numpy array
@param2: replicas_kilo Array with replicas from kilo
@type: numpy array
@param3: find_seconds_chameleon Array with find_seconds from chameleon
@type: numpy array
@param4: replicas_chameleon Array with replicas from chameleon
@type: numpy array
"""
fig = plt.figure()
#plt.title('Average Find Command Runtime by Shard Replication Factor')
plt.ylabel('Runtime in Seconds')
plt.xlabel('Degree of Replication Per Set')
# Make the chart
plt.plot(replicas_kilo, find_seconds_kilo, label='Kilo Cloud')
plt.plot(replicas_chameleon, find_seconds_chameleon, label='Chameleon Cloud')
plt.plot(replicas_jetstream, find_seconds_jetstream, label='Jetstream Cloud')
# http://stackoverflow.com/questions/11744990/how-to-set-auto-for-upper-limit-but-keep-a-fixed-lower-limit-with-matplotlib
plt.ylim(ymin=0)
plt.legend(loc='best')
# Show the chart (for testing)
# plt.show()
# Save the chart
fig.savefig('../report/replica_find.png')
# Run the program by calling the functions
if __name__ == "__main__":
filename = get_parm()
benchmark_df = read_file(filename)
cloud = 'kilo'
config_replicas = 1
mongos_instances = 1
shard_replicas = 1
shards_per_replica = 'X'
select_df = select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica)
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
# percentage death=\
find_seconds_kilo = select_df.as_matrix(columns=[select_df.columns[7]])
replicas_kilo = select_df.as_matrix(columns=[select_df.columns[4]])
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
cloud = 'chameleon'
config_replicas = 1
mongos_instances = 1
shard_replicas = 1
shards_per_replica = 'X'
select_df = select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica)
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
# percentage death=\
find_seconds_chameleon = select_df.as_matrix(columns=[select_df.columns[7]])
replicas_chameleon = select_df.as_matrix(columns=[select_df.columns[4]])
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
cloud = 'jetstream'
config_replicas = 1
mongos_instances = 1
shard_replicas = 1
shards_per_replica = 'X'
select_df = select_data(benchmark_df, cloud, config_replicas, mongos_instances, shard_replicas, shards_per_replica)
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
# percentage death=\
find_seconds_jetstream = select_df.as_matrix(columns=[select_df.columns[7]])
replicas_jetstream = select_df.as_matrix(columns=[select_df.columns[4]])
# http://stackoverflow.com/questions/31791476/pandas-dataframe-to-numpy-array-valueerror
make_figure(find_seconds_kilo, replicas_kilo, find_seconds_chameleon, replicas_chameleon, find_seconds_jetstream, replicas_jetstream)
| apache-2.0 |
adammenges/statsmodels | statsmodels/datasets/strikes/data.py | 25 | 1951 | #! /usr/bin/env python
"""U.S. Strike Duration Data"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """This is public domain."""
TITLE = __doc__
SOURCE = """
This is a subset of the data used in Kennan (1985). It was originally
published by the Bureau of Labor Statistics.
::
Kennan, J. 1985. "The duration of contract strikes in US manufacturing.
`Journal of Econometrics` 28.1, 5-28.
"""
DESCRSHORT = """Contains data on the length of strikes in US manufacturing and
unanticipated industrial production."""
DESCRLONG = """Contains data on the length of strikes in US manufacturing and
unanticipated industrial production. The data is a subset of the data originally
used by Kennan. The data here is data for the months of June only to avoid
seasonal issues."""
#suggested notes
NOTE = """::
Number of observations - 62
Number of variables - 2
Variable name definitions::
duration - duration of the strike in days
iprod - unanticipated industrial production
"""
from numpy import recfromtxt, column_stack, array
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the strikes data and return a Dataset class instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray(data, endog_idx=0, dtype=float)
def load_pandas():
"""
Load the strikes data and return a Dataset class instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray_pandas(data, endog_idx=0, dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(filepath + '/strikes.csv', 'rb'), delimiter=",",
names=True, dtype=float)
return data
| bsd-3-clause |
marmarko/ml101 | tensorflow/examples/skflow/text_classification_builtin_rnn_model.py | 11 | 2984 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import metrics
import pandas
import tensorflow as tf
from tensorflow.contrib import learn
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_bool('test_with_fake_data', False,
'Test the example code with fake data.')
MAX_DOCUMENT_LENGTH = 10
EMBEDDING_SIZE = 50
n_words = 0
def input_op_fn(x):
"""Customized function to transform batched x into embeddings."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = learn.ops.categorical_variable(x, n_classes=n_words,
embedding_size=EMBEDDING_SIZE, name='words')
# Split into list of embedding per word, while removing doc length dim.
# word_list results to be a list of tensors [batch_size, EMBEDDING_SIZE].
word_list = tf.unpack(word_vectors, axis=1)
return word_list
def main(unused_argv):
global n_words
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
vocab_processor = learn.preprocessing.VocabularyProcessor(MAX_DOCUMENT_LENGTH)
x_train = np.array(list(vocab_processor.fit_transform(x_train)))
x_test = np.array(list(vocab_processor.transform(x_test)))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
# Build model: a single direction GRU with a single layer
classifier = learn.TensorFlowRNNClassifier(
rnn_size=EMBEDDING_SIZE, n_classes=15, cell_type='gru',
input_op_fn=input_op_fn, num_layers=1, bidirectional=False,
sequence_length=None, steps=1000, optimizer='Adam',
learning_rate=0.01, continue_training=True)
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = classifier.predict(x_test)
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| bsd-2-clause |
loli/sklearn-ensembletrees | sklearn/tests/test_hmm.py | 31 | 28118 | from __future__ import print_function
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from unittest import TestCase
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn import hmm
from sklearn import mixture
from sklearn.utils.extmath import logsumexp
from sklearn.utils import check_random_state
from nose import SkipTest
rng = np.random.RandomState(0)
np.seterr(all='warn')
class TestBaseHMM(TestCase):
def setUp(self):
self.prng = np.random.RandomState(9)
class StubHMM(hmm._BaseHMM):
def _compute_log_likelihood(self, X):
return self.framelogprob
def _generate_sample_from_state(self):
pass
def _init(self):
pass
def setup_example_hmm(self):
# Example from http://en.wikipedia.org/wiki/Forward-backward_algorithm
h = self.StubHMM(2)
h.transmat_ = [[0.7, 0.3], [0.3, 0.7]]
h.startprob_ = [0.5, 0.5]
framelogprob = np.log([[0.9, 0.2],
[0.9, 0.2],
[0.1, 0.8],
[0.9, 0.2],
[0.9, 0.2]])
# Add dummy observations to stub.
h.framelogprob = framelogprob
return h, framelogprob
def test_init(self):
h, framelogprob = self.setup_example_hmm()
for params in [('transmat_',), ('startprob_', 'transmat_')]:
d = dict((x[:-1], getattr(h, x)) for x in params)
h2 = self.StubHMM(h.n_components, **d)
self.assertEqual(h.n_components, h2.n_components)
for p in params:
assert_array_almost_equal(getattr(h, p), getattr(h2, p))
def test_set_startprob(self):
h, framelogprob = self.setup_example_hmm()
startprob = np.array([0.0, 1.0])
h.startprob_ = startprob
assert np.allclose(startprob, h.startprob_)
def test_set_transmat(self):
h, framelogprob = self.setup_example_hmm()
transmat = np.array([[0.8, 0.2], [0.0, 1.0]])
h.transmat_ = transmat
assert np.allclose(transmat, h.transmat_)
def test_do_forward_pass(self):
h, framelogprob = self.setup_example_hmm()
logprob, fwdlattice = h._do_forward_pass(framelogprob)
reflogprob = -3.3725
self.assertAlmostEqual(logprob, reflogprob, places=4)
reffwdlattice = np.array([[0.4500, 0.1000],
[0.3105, 0.0410],
[0.0230, 0.0975],
[0.0408, 0.0150],
[0.0298, 0.0046]])
assert_array_almost_equal(np.exp(fwdlattice), reffwdlattice, 4)
def test_do_backward_pass(self):
h, framelogprob = self.setup_example_hmm()
bwdlattice = h._do_backward_pass(framelogprob)
refbwdlattice = np.array([[0.0661, 0.0455],
[0.0906, 0.1503],
[0.4593, 0.2437],
[0.6900, 0.4100],
[1.0000, 1.0000]])
assert_array_almost_equal(np.exp(bwdlattice), refbwdlattice, 4)
def test_do_viterbi_pass(self):
h, framelogprob = self.setup_example_hmm()
logprob, state_sequence = h._do_viterbi_pass(framelogprob)
refstate_sequence = [0, 0, 1, 0, 0]
assert_array_equal(state_sequence, refstate_sequence)
reflogprob = -4.4590
self.assertAlmostEqual(logprob, reflogprob, places=4)
def test_score_samples(self):
h, framelogprob = self.setup_example_hmm()
nobs = len(framelogprob)
logprob, posteriors = h.score_samples([])
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(nobs))
reflogprob = -3.3725
self.assertAlmostEqual(logprob, reflogprob, places=4)
refposteriors = np.array([[0.8673, 0.1327],
[0.8204, 0.1796],
[0.3075, 0.6925],
[0.8204, 0.1796],
[0.8673, 0.1327]])
assert_array_almost_equal(posteriors, refposteriors, decimal=4)
def test_hmm_score_samples_consistent_with_gmm(self):
n_components = 8
nobs = 10
h = self.StubHMM(n_components)
# Add dummy observations to stub.
framelogprob = np.log(self.prng.rand(nobs, n_components))
h.framelogprob = framelogprob
# If startprob and transmat are uniform across all states (the
# default), the transitions are uninformative - the model
# reduces to a GMM with uniform mixing weights (in terms of
# posteriors, not likelihoods).
logprob, hmmposteriors = h.score_samples([])
assert_array_almost_equal(hmmposteriors.sum(axis=1), np.ones(nobs))
norm = logsumexp(framelogprob, axis=1)[:, np.newaxis]
gmmposteriors = np.exp(framelogprob - np.tile(norm, (1, n_components)))
assert_array_almost_equal(hmmposteriors, gmmposteriors)
def test_hmm_decode_consistent_with_gmm(self):
n_components = 8
nobs = 10
h = self.StubHMM(n_components)
# Add dummy observations to stub.
framelogprob = np.log(self.prng.rand(nobs, n_components))
h.framelogprob = framelogprob
# If startprob and transmat are uniform across all states (the
# default), the transitions are uninformative - the model
# reduces to a GMM with uniform mixing weights (in terms of
# posteriors, not likelihoods).
viterbi_ll, state_sequence = h.decode([])
norm = logsumexp(framelogprob, axis=1)[:, np.newaxis]
gmmposteriors = np.exp(framelogprob - np.tile(norm, (1, n_components)))
gmmstate_sequence = gmmposteriors.argmax(axis=1)
assert_array_equal(state_sequence, gmmstate_sequence)
def test_base_hmm_attributes(self):
n_components = 20
startprob = self.prng.rand(n_components)
startprob = startprob / startprob.sum()
transmat = self.prng.rand(n_components, n_components)
transmat /= np.tile(transmat.sum(axis=1)
[:, np.newaxis], (1, n_components))
h = self.StubHMM(n_components)
self.assertEqual(h.n_components, n_components)
h.startprob_ = startprob
assert_array_almost_equal(h.startprob_, startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
2 * startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_', [])
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
np.zeros((n_components - 2, 2)))
h.transmat_ = transmat
assert_array_almost_equal(h.transmat_, transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
2 * transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_', [])
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
np.zeros((n_components - 2, n_components)))
def train_hmm_and_keep_track_of_log_likelihood(hmm, obs, n_iter=1, **kwargs):
hmm.n_iter = 1
hmm.fit(obs)
loglikelihoods = []
for n in range(n_iter):
hmm.n_iter = 1
hmm.init_params = ''
hmm.fit(obs)
loglikelihoods.append(sum(hmm.score(x) for x in obs))
return loglikelihoods
class GaussianHMMBaseTester(object):
def setUp(self):
self.prng = prng = np.random.RandomState(10)
self.n_components = n_components = 3
self.n_features = n_features = 3
self.startprob = prng.rand(n_components)
self.startprob = self.startprob / self.startprob.sum()
self.transmat = prng.rand(n_components, n_components)
self.transmat /= np.tile(self.transmat.sum(axis=1)[:, np.newaxis],
(1, n_components))
self.means = prng.randint(-20, 20, (n_components, n_features))
self.covars = {
'spherical': (1.0 + 2 * np.dot(prng.rand(n_components, 1),
np.ones((1, n_features)))) ** 2,
'tied': (make_spd_matrix(n_features, random_state=0)
+ np.eye(n_features)),
'diag': (1.0 + 2 * prng.rand(n_components, n_features)) ** 2,
'full': np.array([make_spd_matrix(n_features, random_state=0)
+ np.eye(n_features)
for x in range(n_components)]),
}
self.expanded_covars = {
'spherical': [np.eye(n_features) * cov
for cov in self.covars['spherical']],
'diag': [np.diag(cov) for cov in self.covars['diag']],
'tied': [self.covars['tied']] * n_components,
'full': self.covars['full'],
}
def test_bad_covariance_type(self):
hmm.GaussianHMM(20, self.covariance_type)
self.assertRaises(ValueError, hmm.GaussianHMM, 20,
'badcovariance_type')
def test_score_samples_and_decode(self):
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
h.means_ = self.means
h.covars_ = self.covars[self.covariance_type]
# Make sure the means are far apart so posteriors.argmax()
# picks the actual component used to generate the observations.
h.means_ = 20 * h.means_
gaussidx = np.repeat(np.arange(self.n_components), 5)
nobs = len(gaussidx)
obs = self.prng.randn(nobs, self.n_features) + h.means_[gaussidx]
ll, posteriors = h.score_samples(obs)
self.assertEqual(posteriors.shape, (nobs, self.n_components))
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(nobs))
viterbi_ll, stateseq = h.decode(obs)
assert_array_equal(stateseq, gaussidx)
def test_sample(self, n=1000):
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
# Make sure the means are far apart so posteriors.argmax()
# picks the actual component used to generate the observations.
h.means_ = 20 * self.means
h.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
h.startprob_ = self.startprob
samples = h.sample(n)[0]
self.assertEqual(samples.shape, (n, self.n_features))
def test_fit(self, params='stmc', n_iter=5, verbose=False, **kwargs):
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
h.startprob_ = self.startprob
h.transmat_ = hmm.normalize(
self.transmat + np.diag(self.prng.rand(self.n_components)), 1)
h.means_ = 20 * self.means
h.covars_ = self.covars[self.covariance_type]
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10)[0] for x in range(10)]
# Mess up the parameters and see if we can re-learn them.
h.n_iter = 0
h.fit(train_obs)
trainll = train_hmm_and_keep_track_of_log_likelihood(
h, train_obs, n_iter=n_iter, params=params, **kwargs)[1:]
# Check that the loglik is always increasing during training
if not np.all(np.diff(trainll) > 0) and verbose:
print('Test train: %s (%s)\n %s\n %s'
% (self.covariance_type, params, trainll, np.diff(trainll)))
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > -0.8,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, -0.8, self.covariance_type, trainll))
def test_fit_works_on_sequences_of_different_length(self):
obs = [self.prng.rand(3, self.n_features),
self.prng.rand(4, self.n_features),
self.prng.rand(5, self.n_features)]
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
# This shouldn't raise
# ValueError: setting an array element with a sequence.
h.fit(obs)
def test_fit_with_length_one_signal(self):
obs = [self.prng.rand(10, self.n_features),
self.prng.rand(8, self.n_features),
self.prng.rand(1, self.n_features)]
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
# This shouldn't raise
# ValueError: zero-size array to reduction operation maximum which has no identity
h.fit(obs)
def test_fit_with_priors(self, params='stmc', n_iter=5, verbose=False):
startprob_prior = 10 * self.startprob + 2.0
transmat_prior = 10 * self.transmat + 2.0
means_prior = self.means
means_weight = 2.0
covars_weight = 2.0
if self.covariance_type in ('full', 'tied'):
covars_weight += self.n_features
covars_prior = self.covars[self.covariance_type]
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
h.startprob_ = self.startprob
h.startprob_prior = startprob_prior
h.transmat_ = hmm.normalize(
self.transmat + np.diag(self.prng.rand(self.n_components)), 1)
h.transmat_prior = transmat_prior
h.means_ = 20 * self.means
h.means_prior = means_prior
h.means_weight = means_weight
h.covars_ = self.covars[self.covariance_type]
h.covars_prior = covars_prior
h.covars_weight = covars_weight
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10)[0] for x in range(10)]
# Mess up the parameters and see if we can re-learn them.
h.n_iter = 0
h.fit(train_obs[:1])
trainll = train_hmm_and_keep_track_of_log_likelihood(
h, train_obs, n_iter=n_iter, params=params)[1:]
# Check that the loglik is always increasing during training
if not np.all(np.diff(trainll) > 0) and verbose:
print('Test MAP train: %s (%s)\n %s\n %s'
% (self.covariance_type, params, trainll, np.diff(trainll)))
# XXX: Why such a large tolerance?
self.assertTrue(np.all(np.diff(trainll) > -0.5))
def test_fit_non_ergodic_transmat(self):
startprob = np.array([1, 0, 0, 0, 0])
transmat = np.array([[0.9, 0.1, 0, 0, 0],
[0, 0.9, 0.1, 0, 0],
[0, 0, 0.9, 0.1, 0],
[0, 0, 0, 0.9, 0.1],
[0, 0, 0, 0, 1.0]])
h = hmm.GaussianHMM(n_components=5,
covariance_type='full', startprob=startprob,
transmat=transmat, n_iter=100, init_params='st')
h.means_ = np.zeros((5, 10))
h.covars_ = np.tile(np.identity(10), (5, 1, 1))
obs = [h.sample(10)[0] for _ in range(10)]
h.fit(obs=obs)
class TestGaussianHMMWithSphericalCovars(GaussianHMMBaseTester, TestCase):
covariance_type = 'spherical'
def test_fit_startprob_and_transmat(self):
self.test_fit('st')
class TestGaussianHMMWithDiagonalCovars(GaussianHMMBaseTester, TestCase):
covariance_type = 'diag'
class TestGaussianHMMWithTiedCovars(GaussianHMMBaseTester, TestCase):
covariance_type = 'tied'
class TestGaussianHMMWithFullCovars(GaussianHMMBaseTester, TestCase):
covariance_type = 'full'
class MultinomialHMMTestCase(TestCase):
"""Using examples from Wikipedia
- http://en.wikipedia.org/wiki/Hidden_Markov_model
- http://en.wikipedia.org/wiki/Viterbi_algorithm
"""
def setUp(self):
self.prng = np.random.RandomState(9)
self.n_components = 2 # ('Rainy', 'Sunny')
self.n_symbols = 3 # ('walk', 'shop', 'clean')
self.emissionprob = [[0.1, 0.4, 0.5], [0.6, 0.3, 0.1]]
self.startprob = [0.6, 0.4]
self.transmat = [[0.7, 0.3], [0.4, 0.6]]
self.h = hmm.MultinomialHMM(self.n_components,
startprob=self.startprob,
transmat=self.transmat)
self.h.emissionprob_ = self.emissionprob
def test_set_emissionprob(self):
h = hmm.MultinomialHMM(self.n_components)
emissionprob = np.array([[0.8, 0.2, 0.0], [0.7, 0.2, 1.0]])
h.emissionprob = emissionprob
assert np.allclose(emissionprob, h.emissionprob)
def test_wikipedia_viterbi_example(self):
# From http://en.wikipedia.org/wiki/Viterbi_algorithm:
# "This reveals that the observations ['walk', 'shop', 'clean']
# were most likely generated by states ['Sunny', 'Rainy',
# 'Rainy'], with probability 0.01344."
observations = [0, 1, 2]
logprob, state_sequence = self.h.decode(observations)
self.assertAlmostEqual(np.exp(logprob), 0.01344)
assert_array_equal(state_sequence, [1, 0, 0])
def test_decode_map_algorithm(self):
observations = [0, 1, 2]
h = hmm.MultinomialHMM(self.n_components, startprob=self.startprob,
transmat=self.transmat, algorithm="map",)
h.emissionprob_ = self.emissionprob
logprob, state_sequence = h.decode(observations)
assert_array_equal(state_sequence, [1, 0, 0])
def test_predict(self):
observations = [0, 1, 2]
state_sequence = self.h.predict(observations)
posteriors = self.h.predict_proba(observations)
assert_array_equal(state_sequence, [1, 0, 0])
assert_array_almost_equal(posteriors, [
[0.23170303, 0.76829697],
[0.62406281, 0.37593719],
[0.86397706, 0.13602294],
])
def test_attributes(self):
h = hmm.MultinomialHMM(self.n_components)
self.assertEqual(h.n_components, self.n_components)
h.startprob_ = self.startprob
assert_array_almost_equal(h.startprob_, self.startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
2 * self.startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_', [])
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
np.zeros((self.n_components - 2, self.n_symbols)))
h.transmat_ = self.transmat
assert_array_almost_equal(h.transmat_, self.transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
2 * self.transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_', [])
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
np.zeros((self.n_components - 2, self.n_components)))
h.emissionprob_ = self.emissionprob
assert_array_almost_equal(h.emissionprob_, self.emissionprob)
self.assertRaises(ValueError, h.__setattr__, 'emissionprob_', [])
self.assertRaises(ValueError, h.__setattr__, 'emissionprob_',
np.zeros((self.n_components - 2, self.n_symbols)))
self.assertEqual(h.n_symbols, self.n_symbols)
def test_score_samples(self):
idx = np.repeat(np.arange(self.n_components), 10)
nobs = len(idx)
obs = [int(x) for x in np.floor(self.prng.rand(nobs) * self.n_symbols)]
ll, posteriors = self.h.score_samples(obs)
self.assertEqual(posteriors.shape, (nobs, self.n_components))
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(nobs))
def test_sample(self, n=1000):
samples = self.h.sample(n)[0]
self.assertEqual(len(samples), n)
self.assertEqual(len(np.unique(samples)), self.n_symbols)
def test_fit(self, params='ste', n_iter=5, verbose=False, **kwargs):
h = self.h
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10)[0] for x in range(10)]
# Mess up the parameters and see if we can re-learn them.
h.startprob_ = hmm.normalize(self.prng.rand(self.n_components))
h.transmat_ = hmm.normalize(self.prng.rand(self.n_components,
self.n_components), axis=1)
h.emissionprob_ = hmm.normalize(
self.prng.rand(self.n_components, self.n_symbols), axis=1)
trainll = train_hmm_and_keep_track_of_log_likelihood(
h, train_obs, n_iter=n_iter, params=params, **kwargs)[1:]
# Check that the loglik is always increasing during training
if not np.all(np.diff(trainll) > 0) and verbose:
print('Test train: (%s)\n %s\n %s' % (params, trainll,
np.diff(trainll)))
self.assertTrue(np.all(np.diff(trainll) > -1.e-3))
def test_fit_emissionprob(self):
self.test_fit('e')
def test_fit_with_init(self, params='ste', n_iter=5, verbose=False,
**kwargs):
h = self.h
learner = hmm.MultinomialHMM(self.n_components)
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10)[0] for x in range(10)]
# use init_function to initialize paramerters
learner._init(train_obs, params)
trainll = train_hmm_and_keep_track_of_log_likelihood(
learner, train_obs, n_iter=n_iter, params=params, **kwargs)[1:]
# Check that the loglik is always increasing during training
if not np.all(np.diff(trainll) > 0) and verbose:
print()
print('Test train: (%s)\n %s\n %s' % (params, trainll,
np.diff(trainll)))
self.assertTrue(np.all(np.diff(trainll) > -1.e-3))
def create_random_gmm(n_mix, n_features, covariance_type, prng=0):
prng = check_random_state(prng)
g = mixture.GMM(n_mix, covariance_type=covariance_type)
g.means_ = prng.randint(-20, 20, (n_mix, n_features))
mincv = 0.1
g.covars_ = {
'spherical': (mincv + mincv * np.dot(prng.rand(n_mix, 1),
np.ones((1, n_features)))) ** 2,
'tied': (make_spd_matrix(n_features, random_state=prng)
+ mincv * np.eye(n_features)),
'diag': (mincv + mincv * prng.rand(n_mix, n_features)) ** 2,
'full': np.array(
[make_spd_matrix(n_features, random_state=prng)
+ mincv * np.eye(n_features) for x in range(n_mix)])
}[covariance_type]
g.weights_ = hmm.normalize(prng.rand(n_mix))
return g
class GMMHMMBaseTester(object):
def setUp(self):
self.prng = np.random.RandomState(9)
self.n_components = 3
self.n_mix = 2
self.n_features = 2
self.covariance_type = 'diag'
self.startprob = self.prng.rand(self.n_components)
self.startprob = self.startprob / self.startprob.sum()
self.transmat = self.prng.rand(self.n_components, self.n_components)
self.transmat /= np.tile(self.transmat.sum(axis=1)[:, np.newaxis],
(1, self.n_components))
self.gmms_ = []
for state in range(self.n_components):
self.gmms_.append(create_random_gmm(
self.n_mix, self.n_features, self.covariance_type,
prng=self.prng))
def test_attributes(self):
h = hmm.GMMHMM(self.n_components, covariance_type=self.covariance_type)
self.assertEqual(h.n_components, self.n_components)
h.startprob_ = self.startprob
assert_array_almost_equal(h.startprob_, self.startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
2 * self.startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_', [])
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
np.zeros((self.n_components - 2, self.n_features)))
h.transmat_ = self.transmat
assert_array_almost_equal(h.transmat_, self.transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
2 * self.transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_', [])
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
np.zeros((self.n_components - 2, self.n_components)))
def test_score_samples_and_decode(self):
h = hmm.GMMHMM(self.n_components, gmms=self.gmms_)
# Make sure the means are far apart so posteriors.argmax()
# picks the actual component used to generate the observations.
for g in h.gmms_:
g.means_ *= 20
refstateseq = np.repeat(np.arange(self.n_components), 5)
nobs = len(refstateseq)
obs = [h.gmms_[x].sample(1).flatten() for x in refstateseq]
ll, posteriors = h.score_samples(obs)
self.assertEqual(posteriors.shape, (nobs, self.n_components))
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(nobs))
viterbi_ll, stateseq = h.decode(obs)
assert_array_equal(stateseq, refstateseq)
def test_sample(self, n=1000):
h = hmm.GMMHMM(self.n_components, self.covariance_type,
startprob=self.startprob, transmat=self.transmat,
gmms=self.gmms_)
samples = h.sample(n)[0]
self.assertEqual(samples.shape, (n, self.n_features))
def test_fit(self, params='stmwc', n_iter=5, verbose=False, **kwargs):
h = hmm.GMMHMM(self.n_components, covars_prior=1.0)
h.startprob_ = self.startprob
h.transmat_ = hmm.normalize(
self.transmat + np.diag(self.prng.rand(self.n_components)), 1)
h.gmms_ = self.gmms_
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10, random_state=self.prng)[0]
for x in range(10)]
# Mess up the parameters and see if we can re-learn them.
h.n_iter = 0
h.fit(train_obs)
h.transmat_ = hmm.normalize(self.prng.rand(self.n_components,
self.n_components), axis=1)
h.startprob_ = hmm.normalize(self.prng.rand(self.n_components))
trainll = train_hmm_and_keep_track_of_log_likelihood(
h, train_obs, n_iter=n_iter, params=params)[1:]
if not np.all(np.diff(trainll) > 0) and verbose:
print('Test train: (%s)\n %s\n %s' % (params, trainll,
np.diff(trainll)))
# XXX: this test appears to check that training log likelihood should
# never be decreasing (up to a tolerance of 0.5, why?) but this is not
# the case when the seed changes.
raise SkipTest("Unstable test: trainll is not always increasing "
"depending on seed")
self.assertTrue(np.all(np.diff(trainll) > -0.5))
def test_fit_works_on_sequences_of_different_length(self):
obs = [self.prng.rand(3, self.n_features),
self.prng.rand(4, self.n_features),
self.prng.rand(5, self.n_features)]
h = hmm.GMMHMM(self.n_components, covariance_type=self.covariance_type)
# This shouldn't raise
# ValueError: setting an array element with a sequence.
h.fit(obs)
class TestGMMHMMWithDiagCovars(GMMHMMBaseTester, TestCase):
covariance_type = 'diag'
def test_fit_startprob_and_transmat(self):
self.test_fit('st')
def test_fit_means(self):
self.test_fit('m')
class TestGMMHMMWithTiedCovars(GMMHMMBaseTester, TestCase):
covariance_type = 'tied'
class TestGMMHMMWithFullCovars(GMMHMMBaseTester, TestCase):
covariance_type = 'full'
def test_normalize_1D():
A = rng.rand(2) + 1.0
for axis in range(1):
Anorm = hmm.normalize(A, axis)
assert np.all(np.allclose(Anorm.sum(axis), 1.0))
def test_normalize_3D():
A = rng.rand(2, 2, 2) + 1.0
for axis in range(3):
Anorm = hmm.normalize(A, axis)
assert np.all(np.allclose(Anorm.sum(axis), 1.0))
| bsd-3-clause |
xiaohan2012/lst | util.py | 1 | 4112 | import codecs
import ujson as json
import math
import gensim
import collections
import functools
import pandas as pd
from datetime import datetime, timedelta
from collections import defaultdict
def load_items_by_line(path):
with codecs.open(path, 'r', 'utf8') as f:
items = set([l.strip()
for l in f])
return items
def load_json_by_line(path):
return map(json.loads, load_items_by_line(path))
def load_id2obj_dict(path, id_key):
try:
df = pd.read_json(path)
except (ValueError, IOError):
df = pd.read_pickle(path)
d = defaultdict(lambda: {'id': 'unknown', 'name': 'unknown',
'subject': '', 'body': ''})
for _, r in df.iterrows():
d[r[id_key]] = r.to_dict()
return d
def get_datetime(obj):
if isinstance(obj, datetime):
return obj
elif (isinstance(obj, float) or
isinstance(obj, int)) and not math.isnan(obj):
return datetime.fromtimestamp(obj)
elif isinstance(obj, long):
return datetime.fromtimestamp(obj / 1000)
elif isinstance(obj, basestring):
patterns = ['%Y-%m-%d %X.%f', '%Y-%m-%d %X']
ok = False
for p in patterns:
try:
dt = datetime.strptime(
obj, p
)
ok = True
except ValueError:
continue
if ok:
return dt
else:
raise ValueError('Bad datetime format for {}'.format(patterns))
else:
raise TypeError('Unacceptable type {}, {}'.format(type(obj), obj))
def compose(*functions):
def inner(arg):
for f in functions:
arg = f(arg)
return arg
return inner
def json_dump(obj, path):
with codecs.open(path, 'w', 'utf8') as f:
f.write(json.dumps(obj))
def json_load(path):
with codecs.open(path, 'r', 'utf8') as f:
return json.load(f)
def load_summary_related_data(interactions_path, people_path,
corpus_dict_path, lda_model_path):
try:
interactions = json.load(open(interactions_path))
except ValueError:
interactions = pd.read_pickle(interactions_path)
try:
people_info = json.load(open(people_path))
except ValueError:
people_info = pd.read_pickle(people_path).to_dict(orient='records')
dictionary = gensim.corpora.dictionary.Dictionary.load(
corpus_dict_path
)
# lda = gensim.models.ldamodel.LdaModel.load(
# lda_model_path
# )
lda = gensim.models.wrappers.LdaMallet.load(lda_model_path)
return interactions, people_info, dictionary, lda
class memoized(object):
"""
Decorator. Caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned
(not reevaluated).
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
# print(args)
if not isinstance(args, collections.Hashable):
# uncacheable. a list, for instance.
# better to not cache than blow up.
return self.func(*args)
if args in self.cache:
# print('cache hit')
return self.cache[args]
else:
# print('cache miss')
value = self.func(*args)
self.cache[args] = value
# print('saving result')
return value
def __repr__(self):
"""Return the function's docstring.
"""
return self.func.__doc__
def __get__(self, obj, objtype):
"""Support instance methods.
"""
return functools.partial(self.__call__, obj)
def format_timestamp(s, format='%Y-%m-%d'):
return datetime.fromtimestamp(s).strftime(format)
def smart_read_df(path):
if path.endswith('.json'):
return pd.read_json(path)
else:
return pd.read_pickle(path)
def parse_time_delta(s):
number, unit = s.split('-')
number = int(number)
return timedelta(**{unit: number})
| mit |
riastrad/newSeer | driver/dum.py | 1 | 2292 | #!/usr/bin/env python3
#
# @Author: Josh Erb <josh.erb>
# @Date: 06-Mar-2017 15:03
# @Email: josh.erb@excella.com
# @Last modified by: josh.erb
# @Last modified time: 24-Apr-2017 22:04
"""
Quick script to glob up a bunch of tsv files and insert them into a local
sqlite3 database.
Will only work if files have been previously cleaned using the cleaner.py
script once this is completed and results have been validated, feel free to
delete raw .csv files.
"""
#######################################
# IMPORTS
#######################################
import os
import sqlite3
import pandas as pd
from tqdm import tqdm
from glob import glob
#######################################
# CONSTANTS
#######################################
data_path = os.path.abspath('data')
data_files = glob(os.path.join(data_path, '*.tsv'))
#######################################
# FUNCTIONS
#######################################
def insert_data(file_list=data_files):
"""
Uses pandas and sqlite to insert a dataframe into your local database.
Note that a connection must have been opened and passed to this function in
order for it to run correctly.
"""
# initialize the db connection & cursor
conn = sqlite3.connect(os.path.join(data_path, 'news.db'))
# add manipulation code here
# (tqdm call used to indicate progress in CLI)
for data in tqdm(file_list, desc='Dumping Progress'):
# load file into df object
df = pd.read_csv(data, sep='\t')
# use the connection to insert the df into our db table
df.to_sql('news', conn, if_exists='append')
# Save changes (necessary?)
conn.commit()
# close out connection
conn.close()
def drop_data():
"""
Quick function to glob the .csv files and remove them quickly. Might not
use this one for a while...just to make sure nothing is
wrong with data cleaning.
"""
files = glob(os.path.join(data_path, '*.csv'))
# iterate over all the .csv files and drop each one
for data in files:
os.remove(data)
def main():
"""
Primary execution function.
"""
insert_data()
#######################################
# EXECUTION
#######################################
if __name__ == '__main__':
main()
| mit |
Garrett-R/scikit-learn | examples/applications/plot_stock_market.py | 29 | 8284 | """
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
.. _stock_market:
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
print(__doc__)
# Author: Gael Varoquaux gael.varoquaux@normalesup.org
# License: BSD 3 clause
import datetime
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import finance
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
###############################################################################
# Retrieve the data from Internet
# Choose a time period reasonnably calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime.datetime(2003, 1, 1)
d2 = datetime.datetime(2008, 1, 1)
# kraft symbol has now changed from KFT to MDLZ in yahoo
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'MTU': 'Mitsubishi',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'Mc Donalds',
'PEP': 'Pepsi',
'MDLZ': 'Kraft Foods',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas instruments',
'XRX': 'Xerox',
'LMT': 'Lookheed Martin',
'WMT': 'Wal-Mart',
'WAG': 'Walgreen',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [finance.quotes_historical_yahoo(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
###############################################################################
# Visualization
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=plt.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
plt.show()
| bsd-3-clause |
CforED/Machine-Learning | examples/ensemble/plot_gradient_boosting_regression.py | 87 | 2510 | """
============================
Gradient Boosting regression
============================
Demonstrate Gradient Boosting on the Boston housing dataset.
This example fits a Gradient Boosting model with least squares loss and
500 regression trees of depth 4.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
###############################################################################
# Load data
boston = datasets.load_boston()
X, y = shuffle(boston.data, boston.target, random_state=13)
X = X.astype(np.float32)
offset = int(X.shape[0] * 0.9)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
###############################################################################
# Fit regression model
params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 2,
'learning_rate': 0.01, 'loss': 'ls'}
clf = ensemble.GradientBoostingRegressor(**params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
print("MSE: %.4f" % mse)
###############################################################################
# Plot training deviance
# compute test set deviance
test_score = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_predict(X_test)):
test_score[i] = clf.loss_(y_test, y_pred)
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.title('Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, clf.train_score_, 'b-',
label='Training Set Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-',
label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')
###############################################################################
# Plot feature importance
feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
plt.subplot(1, 2, 2)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, boston.feature_names[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.show()
| bsd-3-clause |
fengzhyuan/scikit-learn | examples/model_selection/plot_roc_crossval.py | 247 | 3253 | """
=============================================================
Receiver Operating Characteristic (ROC) with cross validation
=============================================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality using cross-validation.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
This example shows the ROC response of different datasets, created from K-fold
cross-validation. Taking all of these curves, it is possible to calculate the
mean area under curve, and see the variance of the curve when the
training set is split into different subsets. This roughly shows how the
classifier output is affected by changes in the training data, and how
different the splits generated by K-fold cross-validation are from one another.
.. note::
See also :func:`sklearn.metrics.auc_score`,
:func:`sklearn.cross_validation.cross_val_score`,
:ref:`example_model_selection_plot_roc.py`,
"""
print(__doc__)
import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import StratifiedKFold
###############################################################################
# Data IO and generation
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
X, y = X[y != 2], y[y != 2]
n_samples, n_features = X.shape
# Add noisy features
random_state = np.random.RandomState(0)
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
###############################################################################
# Classification and ROC analysis
# Run classifier with cross-validation and plot ROC curves
cv = StratifiedKFold(y, n_folds=6)
classifier = svm.SVC(kernel='linear', probability=True,
random_state=random_state)
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
all_tpr = []
for i, (train, test) in enumerate(cv):
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=1, label='ROC fold %d (area = %0.2f)' % (i, roc_auc))
plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Luck')
mean_tpr /= len(cv)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, 'k--',
label='Mean ROC (area = %0.2f)' % mean_auc, lw=2)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
lakshayg/tensorflow | tensorflow/contrib/timeseries/examples/multivariate.py | 67 | 5155 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A multivariate TFTS example.
Fits a multivariate model, exports it, and visualizes the learned correlations
by iteratively predicting and sampling from the predictions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os import path
import tempfile
import numpy
import tensorflow as tf
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example, namely train_and_predict.
HAS_MATPLOTLIB = False
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/multivariate_level.csv")
def multivariate_train_and_sample(
csv_file_name=_DATA_FILE, export_directory=None, training_steps=500):
"""Trains, evaluates, and exports a multivariate model."""
estimator = tf.contrib.timeseries.StructuralEnsembleRegressor(
periodicities=[], num_features=5)
reader = tf.contrib.timeseries.CSVReader(
csv_file_name,
column_names=((tf.contrib.timeseries.TrainEvalFeatures.TIMES,)
+ (tf.contrib.timeseries.TrainEvalFeatures.VALUES,) * 5))
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
# Larger window sizes generally produce a better covariance matrix.
reader, batch_size=4, window_size=64)
estimator.train(input_fn=train_input_fn, steps=training_steps)
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
current_state = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
values = [current_state["observed"]]
times = [current_state[tf.contrib.timeseries.FilteringResults.TIMES]]
# Export the model so we can do iterative prediction and filtering without
# reloading model checkpoints.
if export_directory is None:
export_directory = tempfile.mkdtemp()
input_receiver_fn = estimator.build_raw_serving_input_receiver_fn()
export_location = estimator.export_savedmodel(
export_directory, input_receiver_fn)
with tf.Graph().as_default():
numpy.random.seed(1) # Make the example a bit more deterministic
with tf.Session() as session:
signatures = tf.saved_model.loader.load(
session, [tf.saved_model.tag_constants.SERVING], export_location)
for _ in range(100):
current_prediction = (
tf.contrib.timeseries.saved_model_utils.predict_continuation(
continue_from=current_state, signatures=signatures,
session=session, steps=1))
next_sample = numpy.random.multivariate_normal(
# Squeeze out the batch and series length dimensions (both 1).
mean=numpy.squeeze(current_prediction["mean"], axis=[0, 1]),
cov=numpy.squeeze(current_prediction["covariance"], axis=[0, 1]))
# Update model state so that future predictions are conditional on the
# value we just sampled.
filtering_features = {
tf.contrib.timeseries.TrainEvalFeatures.TIMES: current_prediction[
tf.contrib.timeseries.FilteringResults.TIMES],
tf.contrib.timeseries.TrainEvalFeatures.VALUES: next_sample[
None, None, :]}
current_state = (
tf.contrib.timeseries.saved_model_utils.filter_continuation(
continue_from=current_state,
session=session,
signatures=signatures,
features=filtering_features))
values.append(next_sample[None, None, :])
times.append(current_state["times"])
all_observations = numpy.squeeze(numpy.concatenate(values, axis=1), axis=0)
all_times = numpy.squeeze(numpy.concatenate(times, axis=1), axis=0)
return all_times, all_observations
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
all_times, all_observations = multivariate_train_and_sample()
# Show where sampling starts on the plot
pyplot.axvline(1000, linestyle="dotted")
pyplot.plot(all_times, all_observations)
pyplot.show()
if __name__ == "__main__":
tf.app.run(main=main)
| apache-2.0 |
Achuth17/scikit-learn | examples/cluster/plot_lena_ward_segmentation.py | 271 | 1998 | """
===============================================================
A demo of structured Ward hierarchical clustering on Lena image
===============================================================
Compute the segmentation of a 2D image with Ward hierarchical
clustering. The clustering is spatially constrained in order
for each segmented region to be in one piece.
"""
# Author : Vincent Michel, 2010
# Alexandre Gramfort, 2011
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
###############################################################################
# Generate data
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
X = np.reshape(lena, (-1, 1))
###############################################################################
# Define the structure A of the data. Pixels connected to their neighbors.
connectivity = grid_to_graph(*lena.shape)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
n_clusters = 15 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters,
linkage='ward', connectivity=connectivity).fit(X)
label = np.reshape(ward.labels_, lena.shape)
print("Elapsed time: ", time.time() - st)
print("Number of pixels: ", label.size)
print("Number of clusters: ", np.unique(label).size)
###############################################################################
# Plot the results on an image
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ])
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
dhiapet/PyMC3 | pymc3/glm/glm.py | 14 | 5720 | import numpy as np
from ..core import *
from ..distributions import *
from ..tuning.starting import find_MAP
import patsy
import theano
import pandas as pd
from collections import defaultdict
from pandas.tools.plotting import scatter_matrix
from . import families
def linear_component(formula, data, priors=None,
intercept_prior=None,
regressor_prior=None,
init_vals=None, family=None,
model=None):
"""Create linear model according to patsy specification.
Parameters
----------
formula : str
Patsy linear model descriptor.
data : array
Labeled array (e.g. pandas DataFrame, recarray).
priors : dict
Mapping prior name to prior distribution.
E.g. {'Intercept': Normal.dist(mu=0, sd=1)}
intercept_prior : pymc3 distribution
Prior to use for the intercept.
Default: Normal.dist(mu=0, tau=1.0E-12)
regressor_prior : pymc3 distribution
Prior to use for all regressor(s).
Default: Normal.dist(mu=0, tau=1.0E-12)
init_vals : dict
Set starting values externally: parameter -> value
Default: None
family : statsmodels.family
Link function to pass to statsmodels (init has to be True).
See `statsmodels.api.families`
Default: identity
Output
------
(y_est, coeffs) : Estimate for y, list of coefficients
Example
-------
# Logistic regression
y_est, coeffs = glm('male ~ height + weight',
htwt_data,
family=glm.families.Binomial(link=glm.family.logit))
y_data = Bernoulli('y', y_est, observed=data.male)
"""
if intercept_prior is None:
intercept_prior = Normal.dist(mu=0, tau=1.0E-12)
if regressor_prior is None:
regressor_prior = Normal.dist(mu=0, tau=1.0E-12)
if priors is None:
priors = defaultdict(None)
# Build patsy design matrix and get regressor names.
_, dmatrix = patsy.dmatrices(formula, data)
reg_names = dmatrix.design_info.column_names
if init_vals is None:
init_vals = {}
# Create individual coefficients
model = modelcontext(model)
coeffs = []
if reg_names[0] == 'Intercept':
prior = priors.get('Intercept', intercept_prior)
coeff = model.Var(reg_names.pop(0), prior)
if 'Intercept' in init_vals:
coeff.tag.test_value = init_vals['Intercept']
coeffs.append(coeff)
for reg_name in reg_names:
prior = priors.get(reg_name, regressor_prior)
coeff = model.Var(reg_name, prior)
if reg_name in init_vals:
coeff.tag.test_value = init_vals[reg_name]
coeffs.append(coeff)
y_est = theano.dot(np.asarray(dmatrix), theano.tensor.stack(*coeffs)).reshape((1, -1))
return y_est, coeffs
def glm(*args, **kwargs):
"""Create GLM after Patsy model specification string.
Parameters
----------
formula : str
Patsy linear model descriptor.
data : array
Labeled array (e.g. pandas DataFrame, recarray).
priors : dict
Mapping prior name to prior distribution.
E.g. {'Intercept': Normal.dist(mu=0, sd=1)}
intercept_prior : pymc3 distribution
Prior to use for the intercept.
Default: Normal.dist(mu=0, tau=1.0E-12)
regressor_prior : pymc3 distribution
Prior to use for all regressor(s).
Default: Normal.dist(mu=0, tau=1.0E-12)
init_vals : dict
Set starting values externally: parameter -> value
Default: None
family : Family object
Distribution of likelihood, see pymc3.glm.families
(init has to be True).
Output
------
vars : List of created random variables (y_est, coefficients etc)
Example
-------
# Logistic regression
vars = glm('male ~ height + weight',
data,
family=glm.families.Binomial(link=glm.families.logit))
"""
model = modelcontext(kwargs.get('model'))
family = kwargs.pop('family', families.Normal())
call_find_map = kwargs.pop('find_MAP', True)
formula = args[0]
data = args[1]
y_data = np.asarray(patsy.dmatrices(formula, data)[0]).T
y_est, coeffs = linear_component(*args, **kwargs)
family.create_likelihood(y_est, y_data)
return [y_est] + coeffs
def plot_posterior_predictive(trace, eval=None, lm=None, samples=30, **kwargs):
"""Plot posterior predictive of a linear model.
:Arguments:
trace : <array>
Array of posterior samples with columns
eval : <array>
Array over which to evaluate lm
lm : function <default: linear function>
Function mapping parameters at different points
to their respective outputs.
input: point, sample
output: estimated value
samples : int <default=30>
How many posterior samples to draw.
Additional keyword arguments are passed to pylab.plot().
"""
import matplotlib.pyplot as plt
if lm is None:
lm = lambda x, sample: sample['Intercept'] + sample['x'] * x
if eval is None:
eval = np.linspace(0, 1, 100)
# Set default plotting arguments
if 'lw' not in kwargs and 'linewidth' not in kwargs:
kwargs['lw'] = .2
if 'c' not in kwargs and 'color' not in kwargs:
kwargs['c'] = 'k'
for rand_loc in np.random.randint(0, len(trace), samples):
rand_sample = trace[rand_loc]
plt.plot(eval, lm(eval, rand_sample), **kwargs)
# Make sure to not plot label multiple times
kwargs.pop('label', None)
plt.title('Posterior predictive')
| apache-2.0 |
eickenberg/scikit-learn | sklearn/semi_supervised/label_propagation.py | 1 | 14906 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
from abc import ABCMeta, abstractmethod
from scipy import sparse
import numpy as np
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import rbf_kernel
from ..utils.graph import graph_laplacian
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_X_y
from ..externals import six
from ..neighbors.unsupervised import NearestNeighbors
### Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
if sparse.isspmatrix(X):
X_2d = X
else:
X_2d = np.atleast_2d(X)
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
`X_` : array, shape = [n_samples, n_features]
Input array.
`classes_` : array, shape = [n_classes]
The distinct labels used in classifying instances.
`label_distributions_` : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
`transduction_` : array, shape = [n_samples]
Label assigned to each item via the transduction.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
`X_` : array, shape = [n_samples, n_features]
Input array.
`classes_` : array, shape = [n_classes]
The distinct labels used in classifying instances.
`label_distributions_` : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
`transduction_` : array, shape = [n_samples]
Label assigned to each item via the transduction.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
zimenglan-sysu-512/pose_action_caffe | tools/train_svms.py | 42 | 13247 | #!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
Train post-hoc SVMs using the algorithm and hyper-parameters from
traditional R-CNN.
"""
import _init_paths
from fast_rcnn.config import cfg, cfg_from_file
from datasets.factory import get_imdb
from fast_rcnn.test import im_detect
from utils.timer import Timer
import caffe
import argparse
import pprint
import numpy as np
import numpy.random as npr
import cv2
from sklearn import svm
import os, sys
class SVMTrainer(object):
"""
Trains post-hoc detection SVMs for all classes using the algorithm
and hyper-parameters of traditional R-CNN.
"""
def __init__(self, net, imdb):
self.imdb = imdb
self.net = net
self.layer = 'fc7'
self.hard_thresh = -1.0001
self.neg_iou_thresh = 0.3
dim = net.params['cls_score'][0].data.shape[1]
scale = self._get_feature_scale()
print('Feature dim: {}'.format(dim))
print('Feature scale: {:.3f}'.format(scale))
self.trainers = [SVMClassTrainer(cls, dim, feature_scale=scale)
for cls in imdb.classes]
def _get_feature_scale(self, num_images=100):
TARGET_NORM = 20.0 # Magic value from traditional R-CNN
_t = Timer()
roidb = self.imdb.roidb
total_norm = 0.0
count = 0.0
inds = npr.choice(xrange(self.imdb.num_images), size=num_images,
replace=False)
for i_, i in enumerate(inds):
im = cv2.imread(self.imdb.image_path_at(i))
if roidb[i]['flipped']:
im = im[:, ::-1, :]
_t.tic()
scores, boxes = im_detect(self.net, im, roidb[i]['boxes'])
_t.toc()
feat = self.net.blobs[self.layer].data
total_norm += np.sqrt((feat ** 2).sum(axis=1)).sum()
count += feat.shape[0]
print('{}/{}: avg feature norm: {:.3f}'.format(i_ + 1, num_images,
total_norm / count))
return TARGET_NORM * 1.0 / (total_norm / count)
def _get_pos_counts(self):
counts = np.zeros((len(self.imdb.classes)), dtype=np.int)
roidb = self.imdb.roidb
for i in xrange(len(roidb)):
for j in xrange(1, self.imdb.num_classes):
I = np.where(roidb[i]['gt_classes'] == j)[0]
counts[j] += len(I)
for j in xrange(1, self.imdb.num_classes):
print('class {:s} has {:d} positives'.
format(self.imdb.classes[j], counts[j]))
return counts
def get_pos_examples(self):
counts = self._get_pos_counts()
for i in xrange(len(counts)):
self.trainers[i].alloc_pos(counts[i])
_t = Timer()
roidb = self.imdb.roidb
num_images = len(roidb)
# num_images = 100
for i in xrange(num_images):
im = cv2.imread(self.imdb.image_path_at(i))
if roidb[i]['flipped']:
im = im[:, ::-1, :]
gt_inds = np.where(roidb[i]['gt_classes'] > 0)[0]
gt_boxes = roidb[i]['boxes'][gt_inds]
_t.tic()
scores, boxes = im_detect(self.net, im, gt_boxes)
_t.toc()
feat = self.net.blobs[self.layer].data
for j in xrange(1, self.imdb.num_classes):
cls_inds = np.where(roidb[i]['gt_classes'][gt_inds] == j)[0]
if len(cls_inds) > 0:
cls_feat = feat[cls_inds, :]
self.trainers[j].append_pos(cls_feat)
print 'get_pos_examples: {:d}/{:d} {:.3f}s' \
.format(i + 1, len(roidb), _t.average_time)
def initialize_net(self):
# Start all SVM parameters at zero
self.net.params['cls_score'][0].data[...] = 0
self.net.params['cls_score'][1].data[...] = 0
# Initialize SVMs in a smart way. Not doing this because its such
# a good initialization that we might not learn something close to
# the SVM solution.
# # subtract background weights and biases for the foreground classes
# w_bg = self.net.params['cls_score'][0].data[0, :]
# b_bg = self.net.params['cls_score'][1].data[0]
# self.net.params['cls_score'][0].data[1:, :] -= w_bg
# self.net.params['cls_score'][1].data[1:] -= b_bg
# # set the background weights and biases to 0 (where they shall remain)
# self.net.params['cls_score'][0].data[0, :] = 0
# self.net.params['cls_score'][1].data[0] = 0
def update_net(self, cls_ind, w, b):
self.net.params['cls_score'][0].data[cls_ind, :] = w
self.net.params['cls_score'][1].data[cls_ind] = b
def train_with_hard_negatives(self):
_t = Timer()
roidb = self.imdb.roidb
num_images = len(roidb)
# num_images = 100
for i in xrange(num_images):
im = cv2.imread(self.imdb.image_path_at(i))
if roidb[i]['flipped']:
im = im[:, ::-1, :]
_t.tic()
scores, boxes = im_detect(self.net, im, roidb[i]['boxes'])
_t.toc()
feat = self.net.blobs[self.layer].data
for j in xrange(1, self.imdb.num_classes):
hard_inds = \
np.where((scores[:, j] > self.hard_thresh) &
(roidb[i]['gt_overlaps'][:, j].toarray().ravel() <
self.neg_iou_thresh))[0]
if len(hard_inds) > 0:
hard_feat = feat[hard_inds, :].copy()
new_w_b = \
self.trainers[j].append_neg_and_retrain(feat=hard_feat)
if new_w_b is not None:
self.update_net(j, new_w_b[0], new_w_b[1])
print(('train_with_hard_negatives: '
'{:d}/{:d} {:.3f}s').format(i + 1, len(roidb),
_t.average_time))
def train(self):
# Initialize SVMs using
# a. w_i = fc8_w_i - fc8_w_0
# b. b_i = fc8_b_i - fc8_b_0
# c. Install SVMs into net
self.initialize_net()
# Pass over roidb to count num positives for each class
# a. Pre-allocate arrays for positive feature vectors
# Pass over roidb, computing features for positives only
self.get_pos_examples()
# Pass over roidb
# a. Compute cls_score with forward pass
# b. For each class
# i. Select hard negatives
# ii. Add them to cache
# c. For each class
# i. If SVM retrain criteria met, update SVM
# ii. Install new SVM into net
self.train_with_hard_negatives()
# One final SVM retraining for each class
# Install SVMs into net
for j in xrange(1, self.imdb.num_classes):
new_w_b = self.trainers[j].append_neg_and_retrain(force=True)
self.update_net(j, new_w_b[0], new_w_b[1])
class SVMClassTrainer(object):
"""Manages post-hoc SVM training for a single object class."""
def __init__(self, cls, dim, feature_scale=1.0,
C=0.001, B=10.0, pos_weight=2.0):
self.pos = np.zeros((0, dim), dtype=np.float32)
self.neg = np.zeros((0, dim), dtype=np.float32)
self.B = B
self.C = C
self.cls = cls
self.pos_weight = pos_weight
self.dim = dim
self.feature_scale = feature_scale
self.svm = svm.LinearSVC(C=C, class_weight={1: 2, -1: 1},
intercept_scaling=B, verbose=1,
penalty='l2', loss='l1',
random_state=cfg.RNG_SEED, dual=True)
self.pos_cur = 0
self.num_neg_added = 0
self.retrain_limit = 2000
self.evict_thresh = -1.1
self.loss_history = []
def alloc_pos(self, count):
self.pos_cur = 0
self.pos = np.zeros((count, self.dim), dtype=np.float32)
def append_pos(self, feat):
num = feat.shape[0]
self.pos[self.pos_cur:self.pos_cur + num, :] = feat
self.pos_cur += num
def train(self):
print('>>> Updating {} detector <<<'.format(self.cls))
num_pos = self.pos.shape[0]
num_neg = self.neg.shape[0]
print('Cache holds {} pos examples and {} neg examples'.
format(num_pos, num_neg))
X = np.vstack((self.pos, self.neg)) * self.feature_scale
y = np.hstack((np.ones(num_pos),
-np.ones(num_neg)))
self.svm.fit(X, y)
w = self.svm.coef_
b = self.svm.intercept_[0]
scores = self.svm.decision_function(X)
pos_scores = scores[:num_pos]
neg_scores = scores[num_pos:]
pos_loss = (self.C * self.pos_weight *
np.maximum(0, 1 - pos_scores).sum())
neg_loss = self.C * np.maximum(0, 1 + neg_scores).sum()
reg_loss = 0.5 * np.dot(w.ravel(), w.ravel()) + 0.5 * b ** 2
tot_loss = pos_loss + neg_loss + reg_loss
self.loss_history.append((tot_loss, pos_loss, neg_loss, reg_loss))
for i, losses in enumerate(self.loss_history):
print((' {:d}: obj val: {:.3f} = {:.3f} '
'(pos) + {:.3f} (neg) + {:.3f} (reg)').format(i, *losses))
return ((w * self.feature_scale, b * self.feature_scale),
pos_scores, neg_scores)
def append_neg_and_retrain(self, feat=None, force=False):
if feat is not None:
num = feat.shape[0]
self.neg = np.vstack((self.neg, feat))
self.num_neg_added += num
if self.num_neg_added > self.retrain_limit or force:
self.num_neg_added = 0
new_w_b, pos_scores, neg_scores = self.train()
# scores = np.dot(self.neg, new_w_b[0].T) + new_w_b[1]
# easy_inds = np.where(neg_scores < self.evict_thresh)[0]
not_easy_inds = np.where(neg_scores >= self.evict_thresh)[0]
if len(not_easy_inds) > 0:
self.neg = self.neg[not_easy_inds, :]
# self.neg = np.delete(self.neg, easy_inds)
print(' Pruning easy negatives')
print(' Cache holds {} pos examples and {} neg examples'.
format(self.pos.shape[0], self.neg.shape[0]))
print(' {} pos support vectors'.format((pos_scores <= 1).sum()))
print(' {} neg support vectors'.format((neg_scores >= -1).sum()))
return new_w_b
else:
return None
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train SVMs (old skool)')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--net', dest='caffemodel',
help='model to test',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='voc_2007_trainval', type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
# Must turn this off to prevent issues when digging into the net blobs to
# pull out features (tricky!)
cfg.DEDUP_BOXES = 0
# Must turn this on because we use the test im_detect() method to harvest
# hard negatives
cfg.TEST.SVM = True
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
print('Using config:')
pprint.pprint(cfg)
# fix the random seed for reproducibility
np.random.seed(cfg.RNG_SEED)
# set up caffe
caffe.set_mode_gpu()
if args.gpu_id is not None:
caffe.set_device(args.gpu_id)
net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST)
net.name = os.path.splitext(os.path.basename(args.caffemodel))[0]
out = os.path.splitext(os.path.basename(args.caffemodel))[0] + '_svm'
out_dir = os.path.dirname(args.caffemodel)
imdb = get_imdb(args.imdb_name)
print 'Loaded dataset `{:s}` for training'.format(imdb.name)
# enhance roidb to contain flipped examples
if cfg.TRAIN.USE_FLIPPED:
print 'Appending horizontally-flipped training examples...'
imdb.append_flipped_roidb()
print 'done'
SVMTrainer(net, imdb).train()
filename = '{}/{}.caffemodel'.format(out_dir, out)
net.save(filename)
print 'Wrote svm model to: {:s}'.format(filename)
| mit |
tedunderwood/biographies | topicmodel/interpret/evaluate_hypotheses_docadjusted.py | 1 | 4227 | # evaluate_hypotheses_docadjusted.py
# This script evaluates our preregistered hypotheses using
# the doctopics file produced by MALLET.
import sys, csv
import numpy as np
import pandas as pd
from scipy.spatial.distance import euclidean, cosine
def getdoc(anid):
'''
Gets the docid part of a character id
'''
if '|' in anid:
thedoc = anid.split('|')[0]
else:
print('error', anid)
thedoc = anid
return thedoc
# MAIN starts
args = sys.argv
doctopic_path = args[1]
meta = pd.read_csv('../../metadata/filtered_fiction_plus_18c.tsv', sep = '\t', index_col = 'docid')
meta = meta[~meta.index.duplicated(keep = 'first')]
docsbyauthor = dict()
groupedbyauthor = meta.groupby('author')
for auth, group in groupedbyauthor:
docsbyauthor[auth] = group.index.tolist()
hypotheses = []
significant_persons = set()
with open('../../evaluation/hypotheses.tsv', encoding = 'utf-8') as f:
reader = csv.DictReader(f, delimiter = '\t')
for row in reader:
ids = [row['firstsim'], row['secondsim'], row['distractor']]
for anid in ids:
if '_' in anid:
anid = anid.replace('_', '|')
significant_persons.add(anid)
hypotheses.append(row)
numtopics = 0
chardict = dict()
vectorsbydoc = dict()
with open(doctopic_path, encoding = 'utf-8') as f:
for line in f:
fields = line.strip().split('\t')
charid = fields[1]
vector = np.array(fields[2 : ], dtype = 'float32')
docid = charid.split('|')[0]
if docid not in vectorsbydoc:
vectorsbydoc[docid] = []
vectorsbydoc[docid].append(vector)
if charid in significant_persons:
chardict[charid] = vector
# Make doc centroids
doc_centroids = dict()
for doc, group in vectorsbydoc.items():
centroid = np.sum(group, axis = 0) / len(group)
doc_centroids[doc] = centroid
right = 0
wrong = 0
cosright = 0
coswrong = 0
answers = []
def adjusted_vector(charid):
global chardict, doc_centroids
raw_vector = chardict[charid]
docid = getdoc(charid)
normal_vector = doc_centroids[docid]
divergence = raw_vector - normal_vector
if np.isnan(np.sum(divergence)):
print('error', charid)
return divergence
for h in hypotheses:
first = adjusted_vector(h['firstsim'])
second = adjusted_vector(h['secondsim'])
distract = adjusted_vector(h['distractor'])
pair_euclid = euclidean(first, second)
pair_cos = cosine(first, second)
# first comparison
distraction1 = euclidean(first, distract)
distraction1cos = cosine(first, distract)
if distraction1 < pair_euclid:
wrong += 1
else:
right += 1
if distraction1cos < pair_cos:
coswrong += 1
answers.append([h['hypothesisnum'], h['secondsim'], h['firstsim'], h['distractor'], 'wrong'])
elif distraction1cos == pair_cos:
print('error')
answers.append([h['hypothesisnum'], h['secondsim'], h['firstsim'], h['distractor'], 'error'])
else:
answers.append([h['hypothesisnum'], h['secondsim'], h['firstsim'], h['distractor'], 'right'])
cosright += 1
# second comparison
distraction2 = euclidean(second, distract)
distraction2cos = cosine(second, distract)
if distraction2 < pair_euclid:
wrong += 1
else:
right += 1
if distraction2cos < pair_cos:
coswrong += 1
answers.append([h['hypothesisnum'], h['firstsim'], h['secondsim'], h['distractor'], 'wrong'])
elif distraction2cos == pair_cos:
print('error')
answers.append([h['hypothesisnum'], h['firstsim'], h['secondsim'], h['distractor'], 'error'])
else:
cosright += 1
answers.append([h['hypothesisnum'], h['firstsim'], h['secondsim'], h['distractor'], 'right'])
print('Euclid: ', right / (wrong + right))
print('Cosine: ', cosright / (coswrong + cosright))
user = input('Write to file? ')
if len(user) > 1:
outpath = 'answers/' + user + '.tsv'
with open(outpath, mode = 'w', encoding = 'utf-8') as f:
f.write('index\tcomparand\thinge\tdistractor\tanswer\n')
for a in answers:
f.write('\t'.join(a) + '\n')
| mit |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/examples/pylab_examples/custom_cmap.py | 3 | 4967 | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
"""
Example: suppose you want red to increase from 0 to 1 over the bottom
half, green to do the same over the middle half, and blue over the top
half. Then you would use:
cdict = {'red': ((0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.75, 1.0, 1.0),
(1.0, 1.0, 1.0)),
'blue': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(1.0, 1.0, 1.0))}
If, as in this example, there are no discontinuities in the r, g, and b
components, then it is quite simple: the second and third element of
each tuple, above, is the same--call it "y". The first element ("x")
defines interpolation intervals over the full range of 0 to 1, and it
must span that whole range. In other words, the values of x divide the
0-to-1 range into a set of segments, and y gives the end-point color
values for each segment.
Now consider the green. cdict['green'] is saying that for
0 <= x <= 0.25, y is zero; no green.
0.25 < x <= 0.75, y varies linearly from 0 to 1.
x > 0.75, y remains at 1, full green.
If there are discontinuities, then it is a little more complicated.
Label the 3 elements in each row in the cdict entry for a given color as
(x, y0, y1). Then for values of x between x[i] and x[i+1] the color
value is interpolated between y1[i] and y0[i+1].
Going back to the cookbook example, look at cdict['red']; because y0 !=
y1, it is saying that for x from 0 to 0.5, red increases from 0 to 1,
but then it jumps down, so that for x from 0.5 to 1, red increases from
0.7 to 1. Green ramps from 0 to 1 as x goes from 0 to 0.5, then jumps
back to 0, and ramps back to 1 as x goes from 0.5 to 1.
row i: x y0 y1
/
/
row i+1: x y0 y1
Above is an attempt to show that for x in the range x[i] to x[i+1], the
interpolation is between y1[i] and y0[i+1]. So, y0[0] and y1[-1] are
never used.
"""
cdict1 = {'red': ((0.0, 0.0, 0.0),
(0.5, 0.0, 0.1),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 1.0),
(0.5, 0.1, 0.0),
(1.0, 0.0, 0.0))
}
cdict2 = {'red': ((0.0, 0.0, 0.0),
(0.5, 0.0, 1.0),
(1.0, 0.1, 1.0)),
'green': ((0.0, 0.0, 0.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 0.1),
(0.5, 1.0, 0.0),
(1.0, 0.0, 0.0))
}
cdict3 = {'red': ((0.0, 0.0, 0.0),
(0.25,0.0, 0.0),
(0.5, 0.8, 1.0),
(0.75,1.0, 1.0),
(1.0, 0.4, 1.0)),
'green': ((0.0, 0.0, 0.0),
(0.25,0.0, 0.0),
(0.5, 0.9, 0.9),
(0.75,0.0, 0.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 0.4),
(0.25,1.0, 1.0),
(0.5, 1.0, 0.8),
(0.75,0.0, 0.0),
(1.0, 0.0, 0.0))
}
# Now we will use this example to illustrate 3 ways of
# handling custom colormaps.
# First, the most direct and explicit:
blue_red1 = LinearSegmentedColormap('BlueRed1', cdict1)
# Second, create the map explicitly and register it.
# Like the first method, this method works with any kind
# of Colormap, not just
# a LinearSegmentedColormap:
blue_red2 = LinearSegmentedColormap('BlueRed2', cdict2)
plt.register_cmap(cmap=blue_red2)
# Third, for LinearSegmentedColormap only,
# leave everything to register_cmap:
plt.register_cmap(name='BlueRed3', data=cdict3) # optional lut kwarg
x = np.arange(0, np.pi, 0.1)
y = np.arange(0, 2*np.pi, 0.1)
X, Y = np.meshgrid(x,y)
Z = np.cos(X) * np.sin(Y)
plt.figure(figsize=(10,4))
plt.subplots_adjust(wspace=0.3)
plt.subplot(1,3,1)
plt.imshow(Z, interpolation='nearest', cmap=blue_red1)
plt.colorbar()
plt.subplot(1,3,2)
cmap = plt.get_cmap('BlueRed2')
plt.imshow(Z, interpolation='nearest', cmap=cmap)
plt.colorbar()
# Now we will set the third cmap as the default. One would
# not normally do this in the middle of a script like this;
# it is done here just to illustrate the method.
plt.rcParams['image.cmap'] = 'BlueRed3'
# Also see below for an alternative, particularly for
# interactive use.
plt.subplot(1,3,3)
plt.imshow(Z, interpolation='nearest')
plt.colorbar()
# Or as yet another variation, we could replace the rcParams
# specification *before* the imshow with the following *after*
# imshow:
#
# plt.set_cmap('BlueRed3')
#
# This sets the new default *and* sets the colormap of the last
# image-like item plotted via pyplot, if any.
plt.suptitle('Custom Blue-Red colormaps')
plt.show()
| gpl-2.0 |
kcavagnolo/astroML | book_figures/appendix/fig_fft_text_example.py | 3 | 2376 | """
Example of a Fourier Transform
------------------------------
Figure E.1
An example of approximating the continuous Fourier transform of a function
using the fast Fourier transform.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from scipy import fftpack
from astroML.fourier import FT_continuous, sinegauss, sinegauss_FT
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Choose parameters for the wavelet
N = 10000
t0 = 5
f0 = 2
Q = 2
#------------------------------------------------------------
# Compute the wavelet on a grid of times
Dt = 0.01
t = t0 + Dt * (np.arange(N) - N / 2)
h = sinegauss(t, t0, f0, Q)
#------------------------------------------------------------
# Approximate the continuous Fourier Transform
f, H = FT_continuous(t, h)
rms_err = np.sqrt(np.mean(abs(H - sinegauss_FT(f, t0, f0, Q)) ** 2))
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 3.75))
fig.subplots_adjust(hspace=0.25)
# plot the wavelet
ax = fig.add_subplot(211)
ax.plot(t, h.real, '-', c='black', label='$Re[h]$', lw=1)
ax.plot(t, h.imag, ':', c='black', label='$Im[h]$', lw=1)
ax.legend()
ax.set_xlim(2, 8)
ax.set_ylim(-1.2, 1.2)
ax.set_xlabel('$t$')
ax.set_ylabel('$h(t)$')
# plot the Fourier transform
ax = fig.add_subplot(212)
ax.plot(f, H.real, '-', c='black', label='$Re[H]$', lw=1)
ax.plot(f, H.imag, ':', c='black', label='$Im[H]$', lw=1)
ax.text(0.55, 1.5, "RMS Error = %.2g" % rms_err)
ax.legend()
ax.set_xlim(0.5, 3.5)
ax.set_ylim(-1.9, 1.9)
ax.set_xlabel('$f$')
ax.set_ylabel('$H(f)$')
plt.show()
| bsd-2-clause |
mne-tools/mne-tools.github.io | 0.17/_downloads/440494db0a9c51c8c5092ad97fd1ce2a/plot_topo_customized.py | 13 | 1927 | """
========================================
Plot custom topographies for MEG sensors
========================================
This example exposes the `iter_topography` function that makes it
very easy to generate custom sensor topography plots.
Here we will plot the power spectrum of each channel on a topographic
layout.
"""
# Author: Denis A. Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.viz import iter_topography
from mne import io
from mne.time_frequency import psd_welch
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = io.read_raw_fif(raw_fname, preload=True)
raw.filter(1, 20, fir_design='firwin')
picks = mne.pick_types(raw.info, meg=True, exclude=[])
tmin, tmax = 0, 120 # use the first 120s of data
fmin, fmax = 2, 20 # look at frequencies between 2 and 20Hz
n_fft = 2048 # the FFT size (n_fft). Ideally a power of 2
psds, freqs = psd_welch(raw, picks=picks, tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax)
psds = 20 * np.log10(psds) # scale to dB
def my_callback(ax, ch_idx):
"""
This block of code is executed once you click on one of the channel axes
in the plot. To work with the viz internals, this function should only take
two parameters, the axis and the channel or data index.
"""
ax.plot(freqs, psds[ch_idx], color='red')
ax.set_xlabel = 'Frequency (Hz)'
ax.set_ylabel = 'Power (dB)'
for ax, idx in iter_topography(raw.info,
fig_facecolor='white',
axis_facecolor='white',
axis_spinecolor='white',
on_pick=my_callback):
ax.plot(psds[idx], color='red')
plt.gcf().suptitle('Power spectral densities')
plt.show()
| bsd-3-clause |
GuessWhoSamFoo/pandas | pandas/tests/scalar/timedelta/test_formats.py | 9 | 1068 | # -*- coding: utf-8 -*-
import pytest
from pandas import Timedelta
@pytest.mark.parametrize('td, expected_repr', [
(Timedelta(10, unit='d'), "Timedelta('10 days 00:00:00')"),
(Timedelta(10, unit='s'), "Timedelta('0 days 00:00:10')"),
(Timedelta(10, unit='ms'), "Timedelta('0 days 00:00:00.010000')"),
(Timedelta(-10, unit='ms'), "Timedelta('-1 days +23:59:59.990000')")])
def test_repr(td, expected_repr):
assert repr(td) == expected_repr
@pytest.mark.parametrize('td, expected_iso', [
(Timedelta(days=6, minutes=50, seconds=3, milliseconds=10, microseconds=10,
nanoseconds=12), 'P6DT0H50M3.010010012S'),
(Timedelta(days=4, hours=12, minutes=30, seconds=5), 'P4DT12H30M5S'),
(Timedelta(nanoseconds=123), 'P0DT0H0M0.000000123S'),
# trim nano
(Timedelta(microseconds=10), 'P0DT0H0M0.00001S'),
# trim micro
(Timedelta(milliseconds=1), 'P0DT0H0M0.001S'),
# don't strip every 0
(Timedelta(minutes=1), 'P0DT0H1M0S')])
def test_isoformat(td, expected_iso):
assert td.isoformat() == expected_iso
| bsd-3-clause |
alexeyum/scikit-learn | sklearn/calibration.py | 18 | 19402 | """Calibration of predicted probabilities."""
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Balazs Kegl <balazs.kegl@gmail.com>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# Mathieu Blondel <mathieu@mblondel.org>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from math import log
import numpy as np
from scipy.optimize import fmin_bfgs
from .base import BaseEstimator, ClassifierMixin, RegressorMixin, clone
from .preprocessing import LabelBinarizer
from .utils import check_X_y, check_array, indexable, column_or_1d
from .utils.validation import check_is_fitted
from .utils.fixes import signature
from .isotonic import IsotonicRegression
from .svm import LinearSVC
from .model_selection import check_cv
from .metrics.classification import _check_binary_probabilistic_predictions
class CalibratedClassifierCV(BaseEstimator, ClassifierMixin):
"""Probability calibration with isotonic regression or sigmoid.
With this class, the base_estimator is fit on the train set of the
cross-validation generator and the test set is used for calibration.
The probabilities for each of the folds are then averaged
for prediction. In case that cv="prefit" is passed to __init__,
it is assumed that base_estimator has been fitted already and all
data is used for calibration. Note that data for fitting the
classifier and for calibrating it must be disjoint.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. If cv=prefit, the
classifier must have been fit already on data.
method : 'sigmoid' or 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parametric approach. It is not advised to use isotonic calibration
with too few calibration samples ``(<<1000)`` since it tends to overfit.
Use sigmoids (Platt's calibration) in this case.
cv : integer, cross-validation generator, iterable or "prefit", optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. If ``y``
is neither binary nor multiclass, :class:`sklearn.model_selection.KFold`
is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
If "prefit" is passed, it is assumed that base_estimator has been
fitted already and all data is used for calibration.
Attributes
----------
classes_ : array, shape (n_classes)
The class labels.
calibrated_classifiers_: list (len() equal to cv or 1 if cv == "prefit")
The list of calibrated classifiers, one for each crossvalidation fold,
which has been fitted on all but the validation fold and calibrated
on the validation fold.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator=None, method='sigmoid', cv=3):
self.base_estimator = base_estimator
self.method = method
self.cv = cv
def fit(self, X, y, sample_weight=None):
"""Fit the calibrated model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
X, y = indexable(X, y)
lb = LabelBinarizer().fit(y)
self.classes_ = lb.classes_
# Check that each cross-validation fold can have at least one
# example per class
n_folds = self.cv if isinstance(self.cv, int) \
else self.cv.n_folds if hasattr(self.cv, "n_folds") else None
if n_folds and \
np.any([np.sum(y == class_) < n_folds for class_ in self.classes_]):
raise ValueError("Requesting %d-fold cross-validation but provided"
" less than %d examples for at least one class."
% (n_folds, n_folds))
self.calibrated_classifiers_ = []
if self.base_estimator is None:
# we want all classifiers that don't expose a random_state
# to be deterministic (and we don't want to expose this one).
base_estimator = LinearSVC(random_state=0)
else:
base_estimator = self.base_estimator
if self.cv == "prefit":
calibrated_classifier = _CalibratedClassifier(
base_estimator, method=self.method)
if sample_weight is not None:
calibrated_classifier.fit(X, y, sample_weight)
else:
calibrated_classifier.fit(X, y)
self.calibrated_classifiers_.append(calibrated_classifier)
else:
cv = check_cv(self.cv, y, classifier=True)
fit_parameters = signature(base_estimator.fit).parameters
estimator_name = type(base_estimator).__name__
if (sample_weight is not None
and "sample_weight" not in fit_parameters):
warnings.warn("%s does not support sample_weight. Samples"
" weights are only used for the calibration"
" itself." % estimator_name)
base_estimator_sample_weight = None
else:
base_estimator_sample_weight = sample_weight
for train, test in cv.split(X, y):
this_estimator = clone(base_estimator)
if base_estimator_sample_weight is not None:
this_estimator.fit(
X[train], y[train],
sample_weight=base_estimator_sample_weight[train])
else:
this_estimator.fit(X[train], y[train])
calibrated_classifier = _CalibratedClassifier(
this_estimator, method=self.method)
if sample_weight is not None:
calibrated_classifier.fit(X[test], y[test],
sample_weight[test])
else:
calibrated_classifier.fit(X[test], y[test])
self.calibrated_classifiers_.append(calibrated_classifier)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
# Compute the arithmetic mean of the predictions of the calibrated
# classifiers
mean_proba = np.zeros((X.shape[0], len(self.classes_)))
for calibrated_classifier in self.calibrated_classifiers_:
proba = calibrated_classifier.predict_proba(X)
mean_proba += proba
mean_proba /= len(self.calibrated_classifiers_)
return mean_proba
def predict(self, X):
"""Predict the target of new samples. Can be different from the
prediction of the uncalibrated classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples,)
The predicted class.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
return self.classes_[np.argmax(self.predict_proba(X), axis=1)]
class _CalibratedClassifier(object):
"""Probability calibration with isotonic regression or sigmoid.
It assumes that base_estimator has already been fit, and trains the
calibration on the input set of the fit function. Note that this class
should not be used as an estimator directly. Use CalibratedClassifierCV
with cv="prefit" instead.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. No default value since
it has to be an already fitted estimator.
method : 'sigmoid' | 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parametric approach based on isotonic regression.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator, method='sigmoid'):
self.base_estimator = base_estimator
self.method = method
def _preproc(self, X):
n_classes = len(self.classes_)
if hasattr(self.base_estimator, "decision_function"):
df = self.base_estimator.decision_function(X)
if df.ndim == 1:
df = df[:, np.newaxis]
elif hasattr(self.base_estimator, "predict_proba"):
df = self.base_estimator.predict_proba(X)
if n_classes == 2:
df = df[:, 1:]
else:
raise RuntimeError('classifier has no decision_function or '
'predict_proba method.')
idx_pos_class = np.arange(df.shape[1])
return df, idx_pos_class
def fit(self, X, y, sample_weight=None):
"""Calibrate the fitted model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
lb = LabelBinarizer()
Y = lb.fit_transform(y)
self.classes_ = lb.classes_
df, idx_pos_class = self._preproc(X)
self.calibrators_ = []
for k, this_df in zip(idx_pos_class, df.T):
if self.method == 'isotonic':
calibrator = IsotonicRegression(out_of_bounds='clip')
elif self.method == 'sigmoid':
calibrator = _SigmoidCalibration()
else:
raise ValueError('method should be "sigmoid" or '
'"isotonic". Got %s.' % self.method)
calibrator.fit(this_df, Y[:, k], sample_weight)
self.calibrators_.append(calibrator)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas. Can be exact zeros.
"""
n_classes = len(self.classes_)
proba = np.zeros((X.shape[0], n_classes))
df, idx_pos_class = self._preproc(X)
for k, this_df, calibrator in \
zip(idx_pos_class, df.T, self.calibrators_):
if n_classes == 2:
k += 1
proba[:, k] = calibrator.predict(this_df)
# Normalize the probabilities
if n_classes == 2:
proba[:, 0] = 1. - proba[:, 1]
else:
proba /= np.sum(proba, axis=1)[:, np.newaxis]
# XXX : for some reason all probas can be 0
proba[np.isnan(proba)] = 1. / n_classes
# Deal with cases where the predicted probability minimally exceeds 1.0
proba[(1.0 < proba) & (proba <= 1.0 + 1e-5)] = 1.0
return proba
def _sigmoid_calibration(df, y, sample_weight=None):
"""Probability Calibration with sigmoid method (Platt 2000)
Parameters
----------
df : ndarray, shape (n_samples,)
The decision function or predict proba for the samples.
y : ndarray, shape (n_samples,)
The targets.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
a : float
The slope.
b : float
The intercept.
References
----------
Platt, "Probabilistic Outputs for Support Vector Machines"
"""
df = column_or_1d(df)
y = column_or_1d(y)
F = df # F follows Platt's notations
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
# Bayesian priors (see Platt end of section 2.2)
prior0 = float(np.sum(y <= 0))
prior1 = y.shape[0] - prior0
T = np.zeros(y.shape)
T[y > 0] = (prior1 + 1.) / (prior1 + 2.)
T[y <= 0] = 1. / (prior0 + 2.)
T1 = 1. - T
def objective(AB):
# From Platt (beginning of Section 2.2)
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
l = -(T * np.log(P + tiny) + T1 * np.log(1. - P + tiny))
if sample_weight is not None:
return (sample_weight * l).sum()
else:
return l.sum()
def grad(AB):
# gradient of the objective function
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
TEP_minus_T1P = P * (T * E - T1)
if sample_weight is not None:
TEP_minus_T1P *= sample_weight
dA = np.dot(TEP_minus_T1P, F)
dB = np.sum(TEP_minus_T1P)
return np.array([dA, dB])
AB0 = np.array([0., log((prior0 + 1.) / (prior1 + 1.))])
AB_ = fmin_bfgs(objective, AB0, fprime=grad, disp=False)
return AB_[0], AB_[1]
class _SigmoidCalibration(BaseEstimator, RegressorMixin):
"""Sigmoid regression model.
Attributes
----------
a_ : float
The slope.
b_ : float
The intercept.
"""
def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples,)
Training data.
y : array-like, shape (n_samples,)
Training target.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X = column_or_1d(X)
y = column_or_1d(y)
X, y = indexable(X, y)
self.a_, self.b_ = _sigmoid_calibration(X, y, sample_weight)
return self
def predict(self, T):
"""Predict new data by linear interpolation.
Parameters
----------
T : array-like, shape (n_samples,)
Data to predict from.
Returns
-------
T_ : array, shape (n_samples,)
The predicted data.
"""
T = column_or_1d(T)
return 1. / (1. + np.exp(self.a_ * T + self.b_))
def calibration_curve(y_true, y_prob, normalize=False, n_bins=5):
"""Compute true and predicted probabilities for a calibration curve.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
normalize : bool, optional, default=False
Whether y_prob needs to be normalized into the bin [0, 1], i.e. is not
a proper probability. If True, the smallest value in y_prob is mapped
onto 0 and the largest one onto 1.
n_bins : int
Number of bins. A bigger number requires more data.
Returns
-------
prob_true : array, shape (n_bins,)
The true probability in each bin (fraction of positives).
prob_pred : array, shape (n_bins,)
The mean predicted probability in each bin.
References
----------
Alexandru Niculescu-Mizil and Rich Caruana (2005) Predicting Good
Probabilities With Supervised Learning, in Proceedings of the 22nd
International Conference on Machine Learning (ICML).
See section 4 (Qualitative Analysis of Predictions).
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if normalize: # Normalize predicted values into interval [0, 1]
y_prob = (y_prob - y_prob.min()) / (y_prob.max() - y_prob.min())
elif y_prob.min() < 0 or y_prob.max() > 1:
raise ValueError("y_prob has values outside [0, 1] and normalize is "
"set to False.")
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
bins = np.linspace(0., 1. + 1e-8, n_bins + 1)
binids = np.digitize(y_prob, bins) - 1
bin_sums = np.bincount(binids, weights=y_prob, minlength=len(bins))
bin_true = np.bincount(binids, weights=y_true, minlength=len(bins))
bin_total = np.bincount(binids, minlength=len(bins))
nonzero = bin_total != 0
prob_true = (bin_true[nonzero] / bin_total[nonzero])
prob_pred = (bin_sums[nonzero] / bin_total[nonzero])
return prob_true, prob_pred
| bsd-3-clause |
bmazin/ARCONS-pipeline | examples/Pal2014_throughput/throughputCalc_aperturePhot.py | 1 | 12594 | from util import utils
import sys,os
import tables
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from util.ObsFile import ObsFile
from util import MKIDStd
from util.readDict import readDict
from util.rebin import rebin
import matplotlib
from scipy import interpolate
from scipy.optimize.minpack import curve_fit
from numpy import exp
def cleanSpectrum(x,y,objectName, wvlBinEdges):
#locations and widths of absorption features in Angstroms
#features = [3890,3970,4099,4340,4860,6564,6883,7619]
#widths = [50,50,50,50,50,50,50,50]
#for i in xrange(len(features)):
# #check for absorption feature in std spectrum
# ind = np.where((x<(features[i]+15)) & (x>(features[i]-15)))[0]
# if len(ind)!=0:
# ind = ind[len(ind)/2]
# #if feature is found (flux is higher on both sides of the specified wavelength where the feature should be)
# if y[ind]<y[ind+1] and y[ind]<y[ind-1]:
# #cut out width[i] around feature[i]
# inds = np.where((x >= features[i]+widths[i]) | (x <= features[i]-widths[i]))
# x = x[inds]
# y = y[inds]
#fit a tail to the end of the spectrum to interpolate out to desired wavelength in angstroms
fraction = 0 #4.0/5.0
newx = np.arange(int(x[fraction*len(x)]),20000)
#slopeguess = (np.log(y[-1])-np.log(y[fraction*len(x)]))/(x[-1]-x[fraction*len(x)])
#print "Guess at exponential slope is %f"%(slopeguess)
#guess_a, guess_b, guess_c = float(y[fraction*len(x)]), x[fraction*len(x)], slopeguess
#guess = [guess_a, guess_b, guess_c]
fitx = x[fraction*len(x)::]
fity = y[fraction*len(x)::]
#exp_decay = lambda fx, A, x0, t: A * np.exp((fx-x0) * t)
#params, cov = curve_fit(exp_decay, fitx, fity, p0=guess, maxfev=2000)
#A, x0, t= params
#print "A = %s\nx0 = %s\nt = %s\n"%(A, x0, t)
#best_fit = lambda fx: A * np.exp((fx-x0)*t)
#calcx = np.array(newx,dtype=float)
#newy = best_fit(calcx)
#normalizing
norm = fity.max()
fity/=norm
guess_a, guess_b = 1/(2*h*c**2/1e-9), 5600 #Constant, Temp
guess = [guess_a, guess_b]
blackbody = lambda fx, N, T: N * 2*h*c**2 / (fx)**5 * (exp(h*c/(k*T*(fx))) - 1)**-1 # Planck Law
#blackbody = lambda fx, N, T: N*2*c*k*T/(fx)**4 #Rayleigh Jeans tail
#blackbody = lambda fx, N, T: N*2*h*c**2/(fx**5) * exp(-h*c/(k*T*fx)) #Wein Approx
params, cov = curve_fit(blackbody, fitx*1.0e-8, fity, p0=guess, maxfev=2000)
N, T= params
print "N = %s\nT = %s\n"%(N, T)
best_fit = lambda fx: N * 2*h*c**2 / (fx)**5 * (exp(h*c/(k*T*(fx))) - 1)**-1 #Planck Law
#best_fit = lambda fx: N*2*c*k*T/(fx)**4 # Rayleigh Jeans Tail
#best_fit = lambda fx: N*2*h*c**2/(fx**5) * exp(-h*c/(k*T*fx)) #Wein Approx
calcx = np.array(newx,dtype=float)
bbfit = best_fit(calcx*1.0E-8)
calcx = np.array(newx,dtype=float)
newy = best_fit(calcx*1.0E-8)
fity*=norm
newy*=norm
plt.plot(calcx[3.0*len(fitx)/4.0::],newy[3.0*len(fitx)/4.0::]*1E15,linestyle='--',linewidth=2, color="black",alpha=0.5) #plot fit
#func = interpolate.splrep(x[fration*len(x):],y[fraction*len(x):],s=smooth)
#newx = np.arange(int(x[fraction*len(x)]),self.wvlBinEdges[-1])
#newy = interpolate.splev(newx,func)
wl = np.concatenate((x,newx[newx>max(x)]))
flux = np.concatenate((y,newy[newx>max(x)]))
#new method, rebin data to grid of wavelengths generated from a grid of evenly spaced energy bins
#R=7.0 at 4500
#R=E/dE -> dE = R/E
dE = 0.3936 #eV
start = 1000 #Angs
stop = 25000 #Angs
enBins = ObsFile.makeWvlBins(dE,start,stop)
rebinned = rebin(wl,flux,enBins)
re_wl = rebinned[:,0]
re_flux = rebinned[:,1]
plt.plot(re_wl,re_flux*1E15,linestyle="o", marker="o",markersize=6) #plot rebinned spectrum with exp tail
re_wl = re_wl[np.isnan(re_flux)==False]
re_flux = re_flux[np.isnan(re_flux)==False]
start1 = wvlBinEdges[0]
stop1 = wvlBinEdges[-1]
#regrid downsampled data
new_wl = np.arange(start1,stop1)
#print re_wl
#print re_flux
#print new_wl
#weight=1.0/(re_flux)**(2/1.00)
print len(re_flux)
weight = np.ones(len(re_flux))
#decrease weights near peak
ind = np.where(re_flux == max(re_flux))[0]
weight[ind] = 0.3
for p in [1,2,3]:
if p==1:
wt = 0.3
elif p==2:
wt = 0.6
elif p==3:
wt = 0.7
try:
weight[ind+p] = wt
except IndexError:
pass
try:
if ind-p >= 0:
weight[ind-p] = wt
except IndexError:
pass
#change weights to set how tightly fit must match data points
#weight[-4:] = 1.0
#weight = [0.7,0.7,0.7,0.7,0.7,0.7,0.7,0.7,0.7]
weight = 0.7*np.ones((len(re_wl)))
#print len(weight)
#weight = re_flux/min(re_flux)
#weight = 1.0/weight
#weight = weight/max(weight)
print weight
f = interpolate.splrep(re_wl,re_flux,w=weight,k=2,s=0)#max(re_flux)**300)
new_flux = interpolate.splev(new_wl,f,der=0)
return new_wl, new_flux
def aperture(startpx,startpy,radius=7):
r = radius
length = 2*r
height = length
allx = xrange(startpx-int(np.ceil(length/2.0)),startpx+int(np.floor(length/2.0))+1)
ally = xrange(startpy-int(np.ceil(height/2.0)),startpy+int(np.floor(height/2.0))+1)
pixx = []
pixy = []
mask=np.ones((46,44))
for x in allx:
for y in ally:
if (np.abs(x-startpx))**2+(np.abs(y-startpy))**2 <= (r)**2 and 0 <= y and y < 46 and 0 <= x and x < 44:
mask[y,x]=0.
return mask
c=3.00E10 #cm/s
h=6.626E-27 #erg*s
k=1.3806488E-16 #erg/K
param = readDict()
if len(sys.argv)<2:
print "Provide file name to fit. Syntax >>python throughputCalc_aperturePhot.py objectparams.dict [filenumber]"
sys.exit(1)
#read in parameter file as command line argument
param.read_from_file(sys.argv[1])
#provide optional file number if the object in the param file has alternate .npz files to be specified individually
fileNum = None
if len(sys.argv)>2:
fileNum = "_"+str(sys.argv[2])
npzfitpsf = param['npzfitpsf']
if fileNum != None:
npzfitpsf = npzfitpsf.split('.')[0]+fileNum+'.'+npzfitpsf.split('.')[1]
FramesPerFile = param['FramesPerFile']
guessX = param['guessX'][0]
guessY = param['guessY'][0]
objectName = param['objectName']
#SETUP PLOTTING
#matplotlib.rcParams.update({'font.size':12, 'font.family': 'sans-serif','sans-serif':['Helvetica']})
#plt.rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
#plt.rc('text',usetex=True)
t = np.load(npzfitpsf)
energyBinWidth = 0.1
wvlStart = 3000
wvlStop = 25000
wvlBinEdges = ObsFile.makeWvlBins(energyBinWidth,wvlStart,wvlStop)
nWvlBins = len(wvlBinEdges)-1
binWidths = np.empty(nWvlBins)
for i in xrange(nWvlBins):
binWidths[i] = wvlBinEdges[i+1]-wvlBinEdges[i]
#print binWidths
cube = t['cube']
wvls = t['wvls']
curve=[]
for iFrame in range(0,np.shape(cube)[0]):
print wvls[iFrame]
frame = cube[iFrame,:,:]
objectLight = np.zeros((np.shape(frame)),dtype = float)
skyLight = np.zeros((np.shape(frame)),dtype = float)
nanMask = np.isnan(frame)
err = np.sqrt(frame)
apertureMask = aperture(guessX,guessY,radius=14)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title(wvls[iFrame])
im = ax.matshow(apertureMask, cmap = cm.get_cmap('rainbow'))
fig.colorbar(im)
#plt.show()
objectLight[:,:] = frame[:,:]
objectLight[apertureMask!=0] = np.nan
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title(wvls[iFrame])
im = ax.matshow(objectLight, cmap = cm.get_cmap('rainbow'))
fig.colorbar(im)
#plt.show()
skyLight[:,:] = frame[:,:]
skyLight[apertureMask==0] = np.nan
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title(wvls[iFrame])
im = ax.matshow(skyLight, cmap = cm.get_cmap('rainbow'))
fig.colorbar(im)
#plt.show()
skyLightm = np.ma.masked_array(skyLight, [np.isnan(x) for x in skyLight])
skyLevel = np.ma.median(skyLightm)
print "sky median = ", skyLevel
skySubtracted = objectLight - skyLevel
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title(wvls[iFrame])
im = ax.matshow(skySubtracted, cmap = cm.get_cmap('rainbow'))
fig.colorbar(im)
#plt.show()
countsPerSec = np.nansum(skySubtracted)
print "aperture sum = ", countsPerSec
curve.append(countsPerSec)
curve = np.array(curve)
curve /= binWidths #spectrum is now in counts/s/Angs
np.savez('%s_CountsPerSecondPerBin.npz'%(objectName.strip()),counts=curve,binEdges=wvlBinEdges,binWidths=binWidths,centerWvls=wvls)
diam = 510.55 #5 meter telescope
area = np.pi * ((diam/2.0)**2 -(183/2.0)**2) #secondary obstruction diameter 1.83m
curve/= area #spectrum is now in counts/s/Angs/cm^2
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim(4000,25000)
#ax.set_ylim(0,1E-5)
plt.plot(wvls, curve)
plt.xlabel(ur"Wavelength [\AA]")
plt.ylabel(ur"ARCONS measured Spectrum (Counts/s/cm2/Angs)")
plt.savefig("FluxCal_RawSpectrumCounts.eps",format='eps')
#convert ARCONS measured spectrum to ergs/s/cm^2/A
curve*=h*(c*1E8)/wvls
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim(4000,25000)
#ax.set_ylim(0,0.35E-16)
plt.plot(wvls, curve)
plt.xlabel(ur"Wavelength [\AA]")
plt.ylabel(ur"ARCONS measured Spectrum (Flambda)")
plt.savefig("FluxCal_RawSpectrumFlux.eps",format='eps')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim(4000,25000)
#Begin MKIDStd loading
#objectName = "lds749b"
#import the known spectrum of the calibrator and rebin to the histogram parameters given
#must be imported into array with dtype float so division later does not have error
std = MKIDStd.MKIDStd()
a = std.load(objectName)
a = std.countsToErgs(a)
x = a[:,0]
y = np.array(a[:,1]) #std object spectrum in counts/s/Angs/cm^2
#convert from counts/s to ergs/s
#y=y*(h*(c*1E8)/x)
#End MKIDStd loading
plt.plot(x,y*1E15,linewidth=1,color='grey',alpha=0.75)
newwl, newflux = cleanSpectrum(x,y,objectName,wvlBinEdges)
print newwl
print newflux
#plt.plot(newwl,newflux*1E15,color = 'red')
#plt.show()
newa = rebin(newwl,newflux,wvlBinEdges)
x = newa[:,0]
y = newa[:,1]
plt.step(x,y*1E15,color = 'black',where='mid')
plt.legend(['LDS749B Spectrum','BB Fit','Rebinned Std Spectrum','Resampled Std Spectrum'],'upper right', numpoints=1)
plt.xlabel(ur"Wavelength [\AA]")
plt.ylabel(ur"Flux [10$^{-15}$ ergs/s/cm$^{2}$/\AA]")
plt.ylim(0,15)
plt.savefig('FluxCal_StdSpectrum.eps',format='eps')
#plt.show()
bvrwvls = [4450, 5510, 6580]#center wvls for b v and r Johnson filters
widths = [940/2.0, 880/2.0,1380/2.0] #filter FWHMs
bvrthru = [.23,.24,.35] #as calculated by Pal2013 throughput code
errors = [.04,.04,.04] #as calculated by Pal2013 throughput code (need to get real numbers, these are fake now)
#load QE file for plotting
QEFileName = "avgQE_20131125-202654.txt"
QEfile = os.environ['ARCONS_PIPELINE_PATH']+'/util/data/'+QEFileName
fdata = np.loadtxt(QEfile,dtype=float)
qewvls = np.array(fdata[:,0])*10.0 #convert from nm to Angstroms
QEcurve = np.array(fdata[:,1])
fig = plt.figure()
ax = fig.add_subplot(111)
#plt.errorbar(bvrwvls, np.array(bvrthru)*100, xerr = widths, yerr=np.array(errors)*100, fmt='o',color='black')
#ax.plot(qewvls, QEcurve*100,linestyle="--",color='black')
#ax.plot(wvls,(curve/y)*100,'black')
#ax.set_ylim(4E-3,0.04)
ax.set_ylim(0,5)
ax.set_xlim(4000,11000)
plt.plot(wvls, (curve/y)*100)
plt.xlabel(ur"Wavelength [\AA]")
plt.ylabel(ur"Throughput [\%]")
multqewvls = [4500, 5500, 6500]
multqe = [QEcurve[qewvls==4500][0]*bvrthru[0], QEcurve[qewvls==5500][0]*bvrthru[1], QEcurve[qewvls==6500][0]*bvrthru[2]]
#plt.errorbar(multqewvls, np.array(multqe)*100, color='blue',fmt='o')
print multqe
#plt.title("Sensitivity Curve")
#plt.legend(['Telescope BVR Throughput','ARCONS QE', 'Total QE (Telescope + ARCONS)'],'upper right', numpoints=1)
#ax.set_yscale('log')
plt.savefig("FluxCal_SensitivityCurve.eps",format='eps')
#plt.show()
np.savez('%s_throughput.npz'%(objectName.strip()),throughput=curve/y,wvls=wvls)
| gpl-2.0 |
RuthAngus/chronometer | chronometer/chronometer.py | 1 | 16573 | """
Now use Gibbs sampling to update individual star parameters and global gyro
parameters.
"""
import os
import time
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from isochrones import StarModel
from isochrones.mist import MIST_Isochrone
import h5py
import corner
import priors
from models import gc_model, gyro_model, action_age
import emcee
from utils import replace_nans_with_inits, vk2teff, make_param_dict, \
parameter_assignment, pars_and_mods, transform_parameters
plotpar = {'axes.labelsize': 18,
'font.size': 10,
'legend.fontsize': 18,
'xtick.labelsize': 18,
'ytick.labelsize': 18,
'text.usetex': True}
plt.rcParams.update(plotpar)
def get_n_things(mods, params):
"""
Figure out number of stars, number of global params, etc.
"""
N, nind = len(mods), 5
ngyro = 3
nglob = 4
return N, ngyro, nglob, nind
def lnlike(params, *args):
"""
Probability of age and model parameters given rotation period and colour.
Parameters to pass to the models are selected using the par_inds list in
*args.
Whether a 'gyro parameter only' or 'single star isochrone parameters only'
gibbs step is determined according to the elements of par_inds.
----------
params: (array)
The array of log parameters: a, b, n, masses, ages, etc.
args = mods, periods, period_errs, bvs, bv_errs, par_inds
mods: (list)
A list of starmodel objects, one for each star.
par_inds: (array) args[-1]
The indices of the parameters to vary.
"""
mods, period, period_errs, bv, bv_errs, jz, jz_err, par_inds = args
N, ngyro, nglob, nind = get_n_things(mods, params)
gyro_lnlike, iso_lnlike, kin_lnlike = 0, 0, 0
if par_inds[0] == 0 and par_inds[1] == 1 and par_inds[2] == 2: # if gyro.
# Mask out stars without periods
m = (bv > .4) * (np.isfinite(period))
pi = np.arange(len(params))
par_inds_mask = np.concatenate((pi[:ngyro], pi[ngyro:][m]))
gyro_lnlike = sum(-.5*((period[m] - gyro_model(params[par_inds_mask],
bv[m]))
/period_errs[m])**2)
elif par_inds[0] == 3: # If kinematics
# Mask out stars without vertical actions
m = np.isfinite(jz)
pi = np.arange(len(params))
_m = np.ones(len(params), dtype=bool)
_m[1:] = m
kin_inds_mask = pi[_m]
kin_lnlike = action_age(params[kin_inds_mask], jz[m], jz_err[m])
# If not gyro but single stars
else:
mod_inds = par_inds[0] - nglob
ms = mods[mod_inds]
p = params*1
p[0] = np.exp(p[0])
p[1] = np.log10(1e9*np.exp(p[1]))
p[3] = np.exp(p[3])
iso_lnlike = ms.lnlike(p)
return gyro_lnlike + iso_lnlike + kin_lnlike
def lnprior(params, *args):
"""
lnprior on all parameters.
"""
par_inds = args[-1]
# if gyro.
if par_inds[0] == 0 and par_inds[1] == 1 and par_inds[2] == 2:
g_prior = priors.lng_prior(params[:3])
age_prior = sum([np.log(priors.age_prior(np.log10(1e9*np.exp(i))))
for i in params[3:]])
feh_prior, distance_prior, mAv = 0., 0., True
# if kinematics
elif par_inds[0] == 3:
age_prior = sum([np.log(priors.age_prior(np.log10(1e9*np.exp(i))))
for i in params[1:]])
g_prior, feh_prior, distance_prior, mAv = 0., 0., 0., True
# If individual stars
elif par_inds[0] > 3:
g_prior = 0.
age_prior = np.log(priors.age_prior(np.log10(1e9*np.exp(params[1]))))
feh_prior = np.log(priors.feh_prior(params[2]))
distance_prior = np.log(priors.distance_prior(np.exp(params[3])))
mAv = (0 <= params[4]) * (params[4] < 1) # Prior on A_v
m = (-20 < params) * (params < 20) # Broad bounds on all (log) params.
if sum(m) == len(m) and mAv:
return g_prior + age_prior + feh_prior + distance_prior
else:
return -np.inf
def lnprob(params, *args):
"""
The joint log-probability of age given gyro and iso parameters.
params: (array)
The parameter array.
args: (list)
args to pass to lnlike, including mods, a list of pre-computed star
model objects.
"""
# print("like = ", lnlike(params, *args), "prior = ",
# lnprior(params, *args))
return lnlike(params, *args) + lnprior(params, *args)
def MH(par, lnprob, nsteps, t, *args):
"""
This is where the full list of parameters is reduced to just those being
sampled.
params:
-------
par: (list)
The parameters.
nsteps: (int)
Number of samples.
t: (float)
The std of the proposal distribution.
args: (list)
A list of args to pass to the lnlike function.
mods, periods, period_errs, bvs, bv_errs, par_ind_list = args
returns:
--------
samples: (2d array)
The posterior samples for a single gibbs iteration.
par: (array)
The list of final parameters.
probs: (array)
The lnprob chain.
"""
par_inds = args[-1]
samples = np.zeros((nsteps, len(par[par_inds])))
accept, probs = 0, []
for i in range(nsteps):
par[par_inds], new_prob, acc = MH_step(par[par_inds], lnprob,
t, *args)
accept += acc
probs.append(new_prob)
samples[i, :] = par[par_inds]
if nsteps > 0:
print("Acceptance fraction = ", accept/float(nsteps))
return samples, par, probs
def MH_step(par, lnprob, t, *args, emc=False):
"""
A single Metropolis step.
if emc = True, the step is an emcee step instead.
emcee is run for 10 steps with 64 walkers and the final position is taken
as the step.
This is ridiculous but it should demonstrate that tuning is the problem.
"""
if emc:
nwalkers, ndim = 64, len(par)
p0 = [par + np.random.multivariate_normal(np.zeros((len(par))), t)
for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=args)
sampler.run_mcmc(p0, 10)
return sampler.chain[0][-1], sampler.lnprobability.T[-1, 0], 0
# newp = par + np.random.multivariate_normal(np.zeros((len(par))), t)
newp = par + np.random.multivariate_normal(np.zeros((len(par))), (t*0 + .01))
new_lnprob = lnprob(newp, *args)
alpha = np.exp(new_lnprob - lnprob(par, *args))
if alpha > 1:
par = newp*1
accept = 1
else:
u = np.random.uniform(0, 1)
if alpha > u:
par = newp*1
accept = 1
else:
accept = 0
new_lnprob = lnprob(par, *args)
return par, new_lnprob, accept
def gibbs_control(par, lnprob, nsteps, niter, t, par_inds_list, args):
"""
This function tells the metropolis hastings what parameters to sample in
and assembles the samples into an array. Because the ages are sampled
twice, the gyro age parameters are tacked onto the end of this array.
I'm not actually sure if this is the correct thing to do...
params:
------
par: (list)
The parameters.
lnprob: (function)
The lnprob function.
nsteps: (int)
Number of samples.
niter: (int)
The number of gibbs cycles to perform.
t: (float)
The covariance matrix of the proposal distribution.
par_inds_list: (list)
A list of lists of parameter indices, determining the parameters that
will be varied during the sampling.
args: (list)
A list of the args to parse to lnlike.
mods, period, period_errs, bv, bv_errs, par_inds = args
returns:
-------
samples: (np.array)
2d array of samples. (nsteps, ndim)
lnprobs: (np.array)
Array of lnprobs
"""
ndim = len(par)
nstars = len(args[0])
n_parameter_sets = len(par_inds_list)
# Final sample array. The 2*nstars is for the extra age samples.
all_samples = np.zeros((nsteps * niter, ndim + 2*nstars))
# Iterate over niter cycles of parameter sets.
probs = []
for i in range(niter): # Loop over Gibbs repeats.
print("Gibbs iteration ", i, "of ", niter)
print("Current parameter values = ", par)
for k in range(len(par_inds_list)): # loop over parameter sets.
print(k, "parameter set")
args[-1] = par_inds_list[k]
samples, par, pb = MH(par, lnprob, nsteps, t[k],
*args)
# save age samples separately: gyro samples.
if par_inds_list[k][0] == 0:
all_samples[nsteps*i:nsteps*(i+1), par_inds_list[k][:3]] = \
samples[:, :3]
all_samples[nsteps*i:nsteps*(i+1), -nstars:] = \
samples[:, 3:]
# save age samples separately: iso samples
if par_inds_list[k][0] == 3:
all_samples[nsteps*i:nsteps*(i+1), par_inds_list[k][3]] = \
samples[:, 0]
all_samples[nsteps*i:nsteps*(i+1), -nstars:] = \
samples[:, 1:]
else:
all_samples[nsteps*i:nsteps*(i+1), par_inds_list[k]] = samples
probs.append(pb)
lnprobs = np.array([i for j in probs for i in j])
return all_samples, lnprobs
# def estimate_covariance():
def estimate_covariance(nstars, fn):
"""
Return the covariance matrix of the emcee samples.
If there are more stars than the three that were used to construct this
matrix, repeat the last five columns and rows nstar times.
"""
with h5py.File(fn, "r") as f:
samples = f["action_samples"][...]
cov = np.cov(samples, rowvar=False)
return cov
# print(np.shape(cov))
# n = (np.shape(cov)[0] - 3)/5
# print(n)
# nadd = nstars - n
# print(nadd)
# star_cov_column = cov[:, -5:]
# print(np.shape(star_cov_column))
# for i in range(nadd):
# newcov = np.vstack((cov, star_cov_column))
# print(np.shape(newcov))
# newcov = np.hstack((cov, star_cov_column.T))
# print(np.shape(newcov))
# print(np.shape(newcov))
# assert 0
# return newcov
def find_optimum():
"""
Return the median of the emcee samples.
"""
with h5py.File("emcee_posterior_samples_0525.h5", "r") as f:
samples = f["action_samples"][...]
ndim = np.shape(samples)[1]
return np.array([np.median(samples[:, i]) for i in range(ndim)])
def augment(cov, N, npar):
"""
Add the required number of parameters on to the covariance matrix.
Repeat the individual star covariances for the last star N times.
params:
------
cov: (array)
A 2d array of parameter covariances.
N: (int)
The number of stars to add on.
npar: (int)
The number of parameters per star.
"""
for i in range(N):
new_col = cov[:, -npar:] # select last npar columns.
aug_col = np.hstack((cov, new_col)) # Attach them to cov
new_row = np.hstack((cov[-npar:, :], cov[-npar:, -npar:])) # new row
cov = np.vstack((aug_col, new_row)) # attach new row to cov.
return cov
if __name__ == "__main__":
cov = np.vstack((np.array([1, 2, 3, 4, 5]), np.array([1, 2, 3, 4, 5]),
np.array([1, 2, 3, 4, 5]), np.array([1, 2, 3, 4, 5]),
np.array([1, 2, 3, 4, 5])))
augment(cov, 2, 3)
assert 0
RESULTS_DIR = "/Users/ruthangus/projects/chronometer/chronometer/MH"
# Load the data for the initial parameter array.
DATA_DIR = "/Users/ruthangus/projects/chronometer/chronometer/data"
# d = pd.read_csv(os.path.join(DATA_DIR, "data_file.csv")
d = pd.read_csv(os.path.join(DATA_DIR, "action_data.csv"))
# Generate the initial parameter array and the mods objects from the data
global_params = np.array([.7725, .601, .5189, np.log(350.)]) # a b n beta
params, mods = pars_and_mods(d, global_params)
print(np.exp(params[4:9]), "mass")
print(np.exp(params[9:14]), "age")
print(params[14:19], "feh")
print(np.exp(params[19:24]), "distance")
print(params[24:29], "Av")
# params = find_optimum()
start = time.time() # timeit
# Set nsteps and niter.
nsteps = 1000
niter = 10
N, ngyro, nglob, nind = get_n_things(mods, params)
print(N, "stars")
# Construct parameter indices for the different parameter sets.
par_inds = np.arange(len(params)) # All
age_par_inds = par_inds[nglob+N:nglob+2*N]
gyro_par_inds = np.concatenate((par_inds[:ngyro], age_par_inds))
kin_par_inds = list(age_par_inds)
kin_par_inds.insert(0, par_inds[ngyro])
par_inds_list = [gyro_par_inds, np.array(kin_par_inds)]
for i in range(N):
par_inds_list.append(par_inds[nglob+i::N]) # Iso stars.
# Create the covariance matrices.
t = estimate_covariance(N)
ts = []
for i, par_ind in enumerate(par_inds_list): # For each set of pars:
ti = np.zeros((len(par_ind), len(par_ind))) # Make array of that shape
for j, ind in enumerate(par_ind): # For each index
ti[j] = t[ind][par_ind] # Fill array with those covariances.
ts.append(ti)
print(ts[-1])
print(ts[-2])
input("enter")
# Sample posteriors using MH gibbs
args = [mods, d.prot.values, d.prot_err.values, d.bv.values,
d.bv_err.values, d.Jz.values, d.Jz_err.values, par_inds_list]
flat, lnprobs = gibbs_control(params, lnprob, nsteps, niter, ts,
par_inds_list, args)
# Throw away _number_ Gibbs iterations as burn in. FIXME
number = 2
burnin = nsteps * number
flat = flat[burnin:, :]
end = time.time()
print("Time taken = ", (end - start)/60, "minutes")
print("Plotting results and traces")
plt.clf()
for j in range(int(len(lnprobs)/nsteps - 1)):
x = np.arange(j*nsteps, (j+1)*nsteps)
plt.plot(x, lnprobs[j*nsteps: (j+1)*nsteps])
plt.xlabel("Time")
plt.ylabel("ln (probability)")
plt.savefig(os.path.join(RESULTS_DIR, "prob_trace"))
labels = ["$a$", "$b$", "$n$", "$\\beta$",
"$\ln(Mass_1)$", "$\ln(Mass_2)$", "$\ln(Mass_3)$",
"$\ln(Mass_4)$", "$\ln(Mass_5)$",
"$\ln(Age_{1,i})$", "$\ln(Age_{2,i})$",
"$\ln(Age_{3,i})$", "$\ln(Age_{4,i})$",
"$\ln(Age_{5,i})$",
"$[Fe/H]_1$", "$[Fe/H]_2$", "$[Fe/H]_3$",
"$[Fe/H]_4$", "$[Fe/H]_5$",
"$\ln(D_1)$", "$\ln(D_2)$", "$\ln(D_3)$",
"$\ln(D_4)$", "$\ln(D_5)$",
"$A_{v1}$", "$A_{v2}$", "$A_{v3}$",
"$A_{v4}$", "$A_{v5}$",
"$\ln(Age_{1,g})$", "$\ln(Age_{2,g})$",
"$\ln(Age_{3,g})$", "$\ln(Age_{4,g})$",
"$\ln(Age_{5,g})$",
"$\ln(Age_{1,k})$", "$\ln(Age_{2,k})$",
"$\ln(Age_{3,k})$", "$\ln(Age_{4,k})$",
"$\ln(Age_{5,k})$"]
ages = np.zeros((np.shape(flat)[0]*2, N))
for i in range(N):
ages[:, i] = np.concatenate((flat[:, 3+N+i], flat[:, 3+5*N+i]))
plt.clf()
plt.plot(ages[:, i])
plt.ylabel("age {}".format(i))
plt.savefig(os.path.join(RESULTS_DIR, "age_{}_chain".format(i)))
# Plot chains
ndim = len(params)
for i in range(ndim):
plt.clf()
for j in range(niter - number):
x = np.arange(j*nsteps, (j+1)*nsteps)
plt.plot(x, flat[j*nsteps: (j+1)*nsteps, i].T)
plt.ylabel(labels[i])
print("Making corner plot")
truths = [.7725, .601, .5189, np.log(350.),
np.log(1), None, None, None, None,
np.log(4.56), np.log(2.5), np.log(2.5), None, None,
0., None, None, None, None,
np.log(10), np.log(2400), np.log(2400), None, None,
0., None, None, None, None,
np.log(4.56), np.log(2.5), np.log(2.5), None, None,
np.log(4.56), np.log(2.5), np.log(2.5), None, None]
fig = corner.corner(flat, truths=truths, labels=labels)
fig.savefig(os.path.join(RESULTS_DIR, "full_corner_gibbs"))
f = h5py.File(os.path.join(RESULTS_DIR, "samples.h5"), "w")
data = f.create_dataset("samples", np.shape(flat))
data[:, :] = flat
f.close()
| mit |
Kleptobismol/scikit-bio | doc/sphinxext/numpydoc/numpydoc/tests/test_docscrape.py | 39 | 18326 | # -*- encoding:utf-8 -*-
from __future__ import division, absolute_import, print_function
import sys, textwrap
from numpydoc.docscrape import NumpyDocString, FunctionDoc, ClassDoc
from numpydoc.docscrape_sphinx import SphinxDocString, SphinxClassDoc
from nose.tools import *
if sys.version_info[0] >= 3:
sixu = lambda s: s
else:
sixu = lambda s: unicode(s, 'unicode_escape')
doc_txt = '''\
numpy.multivariate_normal(mean, cov, shape=None, spam=None)
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
Parameters
----------
mean : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
cov : (N, N) ndarray
Covariance matrix of the distribution.
shape : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
Returns
-------
out : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
list of str
This is not a real return value. It exists to test
anonymous return values.
Other Parameters
----------------
spam : parrot
A parrot off its mortal coil.
Raises
------
RuntimeError
Some error
Warns
-----
RuntimeWarning
Some warning
Warnings
--------
Certain warnings apply.
Notes
-----
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
References
----------
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
See Also
--------
some, other, funcs
otherfunc : relationship
Examples
--------
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print list( (x[0,0,:] - mean) < 0.6 )
[True, True]
.. index:: random
:refguide: random;distributions, random;gauss
'''
doc = NumpyDocString(doc_txt)
def test_signature():
assert doc['Signature'].startswith('numpy.multivariate_normal(')
assert doc['Signature'].endswith('spam=None)')
def test_summary():
assert doc['Summary'][0].startswith('Draw values')
assert doc['Summary'][-1].endswith('covariance.')
def test_extended_summary():
assert doc['Extended Summary'][0].startswith('The multivariate normal')
def test_parameters():
assert_equal(len(doc['Parameters']), 3)
assert_equal([n for n,_,_ in doc['Parameters']], ['mean','cov','shape'])
arg, arg_type, desc = doc['Parameters'][1]
assert_equal(arg_type, '(N, N) ndarray')
assert desc[0].startswith('Covariance matrix')
assert doc['Parameters'][0][-1][-2] == ' (1+2+3)/3'
def test_other_parameters():
assert_equal(len(doc['Other Parameters']), 1)
assert_equal([n for n,_,_ in doc['Other Parameters']], ['spam'])
arg, arg_type, desc = doc['Other Parameters'][0]
assert_equal(arg_type, 'parrot')
assert desc[0].startswith('A parrot off its mortal coil')
def test_returns():
assert_equal(len(doc['Returns']), 2)
arg, arg_type, desc = doc['Returns'][0]
assert_equal(arg, 'out')
assert_equal(arg_type, 'ndarray')
assert desc[0].startswith('The drawn samples')
assert desc[-1].endswith('distribution.')
arg, arg_type, desc = doc['Returns'][1]
assert_equal(arg, 'list of str')
assert_equal(arg_type, '')
assert desc[0].startswith('This is not a real')
assert desc[-1].endswith('anonymous return values.')
def test_notes():
assert doc['Notes'][0].startswith('Instead')
assert doc['Notes'][-1].endswith('definite.')
assert_equal(len(doc['Notes']), 17)
def test_references():
assert doc['References'][0].startswith('..')
assert doc['References'][-1].endswith('2001.')
def test_examples():
assert doc['Examples'][0].startswith('>>>')
assert doc['Examples'][-1].endswith('True]')
def test_index():
assert_equal(doc['index']['default'], 'random')
assert_equal(len(doc['index']), 2)
assert_equal(len(doc['index']['refguide']), 2)
def non_blank_line_by_line_compare(a,b):
a = textwrap.dedent(a)
b = textwrap.dedent(b)
a = [l.rstrip() for l in a.split('\n') if l.strip()]
b = [l.rstrip() for l in b.split('\n') if l.strip()]
for n,line in enumerate(a):
if not line == b[n]:
raise AssertionError("Lines %s of a and b differ: "
"\n>>> %s\n<<< %s\n" %
(n,line,b[n]))
def test_str():
non_blank_line_by_line_compare(str(doc),
"""numpy.multivariate_normal(mean, cov, shape=None, spam=None)
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
Parameters
----------
mean : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
cov : (N, N) ndarray
Covariance matrix of the distribution.
shape : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
Returns
-------
out : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
list of str
This is not a real return value. It exists to test
anonymous return values.
Other Parameters
----------------
spam : parrot
A parrot off its mortal coil.
Raises
------
RuntimeError
Some error
Warns
-----
RuntimeWarning
Some warning
Warnings
--------
Certain warnings apply.
See Also
--------
`some`_, `other`_, `funcs`_
`otherfunc`_
relationship
Notes
-----
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
References
----------
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
Examples
--------
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print list( (x[0,0,:] - mean) < 0.6 )
[True, True]
.. index:: random
:refguide: random;distributions, random;gauss""")
def test_sphinx_str():
sphinx_doc = SphinxDocString(doc_txt)
non_blank_line_by_line_compare(str(sphinx_doc),
"""
.. index:: random
single: random;distributions, random;gauss
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
:Parameters:
**mean** : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
**cov** : (N, N) ndarray
Covariance matrix of the distribution.
**shape** : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
:Returns:
**out** : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
list of str
This is not a real return value. It exists to test
anonymous return values.
:Other Parameters:
**spam** : parrot
A parrot off its mortal coil.
:Raises:
**RuntimeError**
Some error
:Warns:
**RuntimeWarning**
Some warning
.. warning::
Certain warnings apply.
.. seealso::
:obj:`some`, :obj:`other`, :obj:`funcs`
:obj:`otherfunc`
relationship
.. rubric:: Notes
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
.. rubric:: References
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
.. only:: latex
[1]_, [2]_
.. rubric:: Examples
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print list( (x[0,0,:] - mean) < 0.6 )
[True, True]
""")
doc2 = NumpyDocString("""
Returns array of indices of the maximum values of along the given axis.
Parameters
----------
a : {array_like}
Array to look in.
axis : {None, integer}
If None, the index is into the flattened array, otherwise along
the specified axis""")
def test_parameters_without_extended_description():
assert_equal(len(doc2['Parameters']), 2)
doc3 = NumpyDocString("""
my_signature(*params, **kwds)
Return this and that.
""")
def test_escape_stars():
signature = str(doc3).split('\n')[0]
assert_equal(signature, 'my_signature(\*params, \*\*kwds)')
doc4 = NumpyDocString(
"""a.conj()
Return an array with all complex-valued elements conjugated.""")
def test_empty_extended_summary():
assert_equal(doc4['Extended Summary'], [])
doc5 = NumpyDocString(
"""
a.something()
Raises
------
LinAlgException
If array is singular.
Warns
-----
SomeWarning
If needed
""")
def test_raises():
assert_equal(len(doc5['Raises']), 1)
name,_,desc = doc5['Raises'][0]
assert_equal(name,'LinAlgException')
assert_equal(desc,['If array is singular.'])
def test_warns():
assert_equal(len(doc5['Warns']), 1)
name,_,desc = doc5['Warns'][0]
assert_equal(name,'SomeWarning')
assert_equal(desc,['If needed'])
def test_see_also():
doc6 = NumpyDocString(
"""
z(x,theta)
See Also
--------
func_a, func_b, func_c
func_d : some equivalent func
foo.func_e : some other func over
multiple lines
func_f, func_g, :meth:`func_h`, func_j,
func_k
:obj:`baz.obj_q`
:class:`class_j`: fubar
foobar
""")
assert len(doc6['See Also']) == 12
for func, desc, role in doc6['See Also']:
if func in ('func_a', 'func_b', 'func_c', 'func_f',
'func_g', 'func_h', 'func_j', 'func_k', 'baz.obj_q'):
assert(not desc)
else:
assert(desc)
if func == 'func_h':
assert role == 'meth'
elif func == 'baz.obj_q':
assert role == 'obj'
elif func == 'class_j':
assert role == 'class'
else:
assert role is None
if func == 'func_d':
assert desc == ['some equivalent func']
elif func == 'foo.func_e':
assert desc == ['some other func over', 'multiple lines']
elif func == 'class_j':
assert desc == ['fubar', 'foobar']
def test_see_also_print():
class Dummy(object):
"""
See Also
--------
func_a, func_b
func_c : some relationship
goes here
func_d
"""
pass
obj = Dummy()
s = str(FunctionDoc(obj, role='func'))
assert(':func:`func_a`, :func:`func_b`' in s)
assert(' some relationship' in s)
assert(':func:`func_d`' in s)
doc7 = NumpyDocString("""
Doc starts on second line.
""")
def test_empty_first_line():
assert doc7['Summary'][0].startswith('Doc starts')
def test_no_summary():
str(SphinxDocString("""
Parameters
----------"""))
def test_unicode():
doc = SphinxDocString("""
öäöäöäöäöåååå
öäöäöäööäååå
Parameters
----------
ååå : äää
ööö
Returns
-------
ååå : ööö
äää
""")
assert isinstance(doc['Summary'][0], str)
assert doc['Summary'][0] == 'öäöäöäöäöåååå'
def test_plot_examples():
cfg = dict(use_plots=True)
doc = SphinxDocString("""
Examples
--------
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3],[4,5,6])
>>> plt.show()
""", config=cfg)
assert 'plot::' in str(doc), str(doc)
doc = SphinxDocString("""
Examples
--------
.. plot::
import matplotlib.pyplot as plt
plt.plot([1,2,3],[4,5,6])
plt.show()
""", config=cfg)
assert str(doc).count('plot::') == 1, str(doc)
def test_class_members():
class Dummy(object):
"""
Dummy class.
"""
def spam(self, a, b):
"""Spam\n\nSpam spam."""
pass
def ham(self, c, d):
"""Cheese\n\nNo cheese."""
pass
@property
def spammity(self):
"""Spammity index"""
return 0.95
class Ignorable(object):
"""local class, to be ignored"""
pass
for cls in (ClassDoc, SphinxClassDoc):
doc = cls(Dummy, config=dict(show_class_members=False))
assert 'Methods' not in str(doc), (cls, str(doc))
assert 'spam' not in str(doc), (cls, str(doc))
assert 'ham' not in str(doc), (cls, str(doc))
assert 'spammity' not in str(doc), (cls, str(doc))
assert 'Spammity index' not in str(doc), (cls, str(doc))
doc = cls(Dummy, config=dict(show_class_members=True))
assert 'Methods' in str(doc), (cls, str(doc))
assert 'spam' in str(doc), (cls, str(doc))
assert 'ham' in str(doc), (cls, str(doc))
assert 'spammity' in str(doc), (cls, str(doc))
if cls is SphinxClassDoc:
assert '.. autosummary::' in str(doc), str(doc)
else:
assert 'Spammity index' in str(doc), str(doc)
def test_duplicate_signature():
# Duplicate function signatures occur e.g. in ufuncs, when the
# automatic mechanism adds one, and a more detailed comes from the
# docstring itself.
doc = NumpyDocString(
"""
z(x1, x2)
z(a, theta)
""")
assert doc['Signature'].strip() == 'z(a, theta)'
class_doc_txt = """
Foo
Parameters
----------
f : callable ``f(t, y, *f_args)``
Aaa.
jac : callable ``jac(t, y, *jac_args)``
Bbb.
Attributes
----------
t : float
Current time.
y : ndarray
Current variable values.
Methods
-------
a
b
c
Examples
--------
For usage examples, see `ode`.
"""
def test_class_members_doc():
doc = ClassDoc(None, class_doc_txt)
non_blank_line_by_line_compare(str(doc),
"""
Foo
Parameters
----------
f : callable ``f(t, y, *f_args)``
Aaa.
jac : callable ``jac(t, y, *jac_args)``
Bbb.
Examples
--------
For usage examples, see `ode`.
Attributes
----------
t : float
Current time.
y : ndarray
Current variable values.
Methods
-------
a
b
c
.. index::
""")
def test_class_members_doc_sphinx():
doc = SphinxClassDoc(None, class_doc_txt)
non_blank_line_by_line_compare(str(doc),
"""
Foo
:Parameters:
**f** : callable ``f(t, y, *f_args)``
Aaa.
**jac** : callable ``jac(t, y, *jac_args)``
Bbb.
.. rubric:: Examples
For usage examples, see `ode`.
.. rubric:: Attributes
=== ==========
t (float) Current time.
y (ndarray) Current variable values.
=== ==========
.. rubric:: Methods
=== ==========
a
b
c
=== ==========
""")
if __name__ == "__main__":
import nose
nose.run()
| bsd-3-clause |
iABC2XYZ/abc | Epics/DataAna11.3.py | 1 | 8055 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 27 15:44:34 2017
@author: p
"""
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
plt.close('all')
def GenWeight(shape):
initial = tf.truncated_normal(shape, stddev=1.)
return tf.Variable(initial)
def GenBias(shape):
initial = tf.constant(1., shape=shape)
return tf.Variable(initial)
def getDataRow(exData,sizeRow,numMem=1):
numEx=np.shape(exData)[0]
idChoose1=np.random.randint(0,high=numEx-numMem,size=(sizeRow))
idChoose2=idChoose1+numMem
yCHV1=np.reshape(exData[idChoose1,0:14],(sizeRow,14))
xBPM1=np.reshape(exData[idChoose1,14:24],(sizeRow,10))
yCHV2=np.reshape(exData[idChoose2,0:14],(sizeRow,14))
xBPM2=np.reshape(exData[idChoose2,14:24],(sizeRow,10))
# x: 当前电流yCHV1 [14],当前位置xBPM1 [10],需要改变到的位置xBPM2 【10】
# y: 需要改变到的电流yCHV1 【14】
X=np.hstack((xBPM1,xBPM2,yCHV1))
Y=yCHV2
return X,Y
def conv1d(x, W):
return tf.nn.conv1d(x, W, stride=1, padding="SAME")
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding="SAME")
nameFolder='/home/e/ABC/abc/Epics/'
exData=np.loadtxt(nameFolder+'Rec.dat')
testData=np.loadtxt(nameFolder+'testRec.dat')
bpm=tf.placeholder(tf.float32,shape=(None,34))
cHV=tf.placeholder(tf.float32,shape=(None,14))
xInput=bpm
yInput=cHV
#
w1= GenWeight([34,34])
b1=GenBias([34])
x1=tf.nn.relu(tf.matmul(xInput,w1)+b1)
#
w2= GenWeight([34,200])
b2=GenBias([200])
x2=tf.nn.relu(tf.matmul(x1,w2)+b2)
#
w3= GenWeight([200,14])
b3=GenBias([14])
x3=tf.matmul(x2,w3)+b3
#
tf.nn.dropout(x3,0.5)
##
xFinal=x3
xOutput=tf.reshape(xFinal,(-1,14))
yOutput=tf.reshape(yInput,(-1,14))
lossFn=tf.reduce_mean(tf.square(xOutput-yOutput))
trainBPM_1=tf.train.AdamOptimizer(0.05)
optBPM_1=trainBPM_1.minimize(lossFn)
trainBPM_2=tf.train.AdamOptimizer(0.01)
optBPM_2=trainBPM_2.minimize(lossFn)
trainBPM_3=tf.train.AdamOptimizer(0.005)
optBPM_3=trainBPM_3.minimize(lossFn)
trainBPM_4=tf.train.AdamOptimizer(0.001)
optBPM_4=trainBPM_4.minimize(lossFn)
iniBPM=tf.global_variables_initializer()
try:
if vars().has_key('se'):
se.close()
except:
pass
se= tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=True))
se.run(iniBPM)
nIt=2e7
sizeRow=100
stepLossRec=50
nLossRec=np.int32(nIt/stepLossRec+1)
lossRec=np.zeros((nLossRec))
lossTestRec=np.zeros((nLossRec))
iRec=0
for i in range(np.int32(nIt)):
xBPM,yCHV=getDataRow(exData,sizeRow)
se.run(optBPM_4,feed_dict={bpm:xBPM,cHV:yCHV})
if i % stepLossRec==0:
lossRecTmp=se.run(lossFn,feed_dict={bpm:xBPM,cHV:yCHV})
lossRec[iRec]=lossRecTmp
#testBPM,testCHV=getDataRow(testData,np.shape(testData)[0])
testBPM,testCHV=getDataRow(testData,sizeRow)
lossTestRecTmp=se.run(lossFn,feed_dict={bpm:testBPM,cHV:testCHV})
lossTestRec[iRec]=lossTestRecTmp
iRec+=1
print lossRecTmp,lossTestRecTmp
plt.figure('lossRec')
numPlot=30
plt.clf()
plt.subplot(1,2,1)
if iRec<=numPlot:
xPlot=np.linspace(0,iRec-1,iRec)
yPlot=lossRec[0:iRec:]
yPlotMean=np.cumsum(yPlot)/(xPlot+1)
else:
xPlot=np.linspace(iRec-numPlot,iRec-1,numPlot)
yPlot=lossRec[iRec-numPlot:iRec:]
yPlotMean[0:-1:]=yPlotMean[1::]
yPlotMean[-1]=np.mean(yPlot)
plt.hold
plt.plot(xPlot,yPlot,'*b')
plt.plot(xPlot,yPlotMean,'go')
plt.grid('on')
plt.title('Train '+str(i))
#
plt.subplot(1,2,2)
if iRec<=numPlot:
xPlotT=np.linspace(0,iRec-1,iRec)
yPlotT=lossTestRec[0:iRec:]
yPlotMeanT=np.cumsum(yPlotT)/(xPlotT+1)
else:
xPlotT=np.linspace(iRec-numPlot,iRec-1,numPlot)
yPlotT=lossTestRec[iRec-numPlot:iRec:]
yPlotMeanT[0:-1:]=yPlotMeanT[1::]
yPlotMeanT[-1]=np.mean(yPlotT)
plt.hold
plt.plot(xPlotT,yPlotT,'*b')
plt.plot(xPlotT,yPlotMeanT,'go')
plt.grid('on')
plt.title('Test '+str(i))
plt.pause(0.05)
xBPM,yCHV=getDataRow(exData,1)
yCHV_Cal=se.run(xFinal,feed_dict={bpm:xBPM})
testBPM,testCHV=getDataRow(testData,1)
testCHV_Cal=se.run(xFinal,feed_dict={bpm:testBPM})
plt.figure('EX')
plt.clf()
plt.subplot(121)
plt.hold
plt.plot(np.reshape(yCHV[0,:],(14)),'bd')
plt.plot(yCHV_Cal[0,:],'rd')
plt.title(i)
plt.subplot(122)
plt.hold
plt.plot(np.reshape(testCHV[0,:],(14)),'bd')
plt.plot(testCHV_Cal[0,:],'rd')
plt.title(i)
plt.pause(0.05)
###################### FINAL PLOT -------------
plotFolder='./11.3/'
import os
if not os.path.exists(plotFolder):
os.makedirs(plotFolder)
else:
plotFolder=plotFolder[0:-1]+'Temp/'
os.makedirs(plotFolder)
plt.close('all')
# Train Plot
xBPM,yCHV=getDataRow(exData,sizeRow)
nameFig=plotFolder+'Loss Train RecSave'
fig=plt.figure(nameFig)
numPlot=30
plt.clf()
if iRec<=numPlot:
xPlot=np.linspace(0,iRec-1,iRec)
yPlot=lossRec[0:iRec:]
yPlotMean=np.cumsum(yPlot)/(xPlot+1)
else:
xPlot=np.linspace(iRec-numPlot,iRec-1,numPlot)
yPlot=lossRec[iRec-numPlot:iRec:]
yPlotMean[0:-1:]=yPlotMean[1::]
yPlotMean[-1]=np.mean(yPlot)
plt.hold
plt.plot(xPlot,yPlot,'*b')
plt.plot(xPlot,yPlotMean,'go')
plt.grid('on')
plt.title(nameFig)
nameFig+='.png'
fig.savefig(nameFig)
xBPM,yCHV=getDataRow(exData,1)
yCHV_Cal=se.run(xFinal,feed_dict={bpm:xBPM})
nameFig=plotFolder+'train Ex. Save'
plt.figure(nameFig)
plt.clf()
plt.hold
plt.plot(np.reshape(yCHV[0,:],(14)),'bd')
plt.plot(yCHV_Cal[0,:],'rd')
plt.title(nameFig)
nameFig+='.png'
plt.savefig(nameFig)
# Test Plot
plt.close('all')
testBPM,testCHV=getDataRow(testData,np.shape(testData)[0])
nameFig=plotFolder+'Loss Test RecSave'
fig=plt.figure(nameFig)
numPlot=30
plt.clf()
if iRec<=numPlot:
xPlot=np.linspace(0,iRec-1,iRec)
yPlot=lossTestRec[0:iRec:]
yPlotMean=np.cumsum(yPlot)/(xPlot+1)
else:
xPlot=np.linspace(iRec-numPlot,iRec-1,numPlot)
yPlot=lossTestRec[iRec-numPlot:iRec:]
yPlotMean[0:-1:]=yPlotMean[1::]
yPlotMean[-1]=np.mean(yPlot)
plt.hold
plt.plot(xPlot,yPlot,'*b')
plt.plot(xPlot,yPlotMean,'go')
plt.grid('on')
plt.title(nameFig)
nameFig+='.png'
fig.savefig(nameFig)
xBPMT,yCHVT=getDataRow(exData,1)
yCHVT_Cal=se.run(xFinal,feed_dict={bpm:xBPMT})
nameFig=plotFolder+'Test Ex. Save'
plt.figure(nameFig)
plt.clf()
plt.hold
plt.plot(np.reshape(yCHVT[0,:],(14)),'bd')
plt.plot(yCHVT_Cal[0,:],'rd')
plt.title(nameFig)
nameFig+='.png'
plt.savefig(nameFig)
##
testBPM,testCHV=getDataRow(testData,np.shape(testData)[0])
testCHVCal=np.reshape(se.run(xFinal,feed_dict={bpm:testBPM}),np.shape(testCHV))
for i in range(7):
for j in range(2):
if j==0:
nameFigure=plotFolder+'Test x: '+str(i+1)
else:
nameFigure=plotFolder+'Test y: '+str(i+1)
plt.figure(nameFigure)
plt.clf()
plt.hold
plt.plot(testCHV[:,i+j*7],'r.')
plt.plot(testCHVCal[:,i+j*7],'b.')
plt.title(nameFigure)
nameFig=nameFigure+'.png'
plt.savefig(nameFig)
for i in range(7):
for j in range(2):
if j==0:
nameFigure=plotFolder+'Ratio Test x: '+str(i+1)
else:
nameFigure=plotFolder+'Ratio Test y: '+str(i+1)
plt.figure(nameFigure)
plt.clf()
plt.hold
plt.plot((testCHV[:,i+j*7]-testCHVCal[:,i+j*7])/testCHV[:,i+j*7],'r.')
nameFig=nameFigure+'.png'
plt.savefig(nameFig)
plt.close('all')
| gpl-3.0 |
ai-se/XTREE | src/tools/oracle.py | 1 | 6506 | from __future__ import division
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from collections import Counter
from scipy.spatial.distance import euclidean
from random import choice, seed as rseed, uniform as rand
import pandas as pd
import numpy as np
from texttable import Texttable
from stats import abcd
from misc import *
from pdb import set_trace
from sklearn.neighbors import NearestNeighbors, BallTree, KDTree
from sklearn.svm import SVC, SVR
import warnings
from time import time
warnings.filterwarnings('ignore')
def SMOTE(data=None, atleast=50, atmost=100, a=None,b=None, k=5, resample=False):
"Synthetic Minority Oversampling Technique"
# set_trace()
def knn(a,b):
"k nearest neighbors"
b=np.array([bb[:-1] for bb in b])
tree = BallTree(b)
__, indx = tree.query(a[:-1], k=6)
return [b[i] for i in indx]
# set_trace()
# return sorted(b, key=lambda F: euclidean(a[:-1], F[:-1]))
def kfn(me,my_lot,others):
"k farthest neighbors"
my_closest = None
return sorted(b, key=lambda F: euclidean(a[:-1], F[:-1]))
def extrapolate(one, two):
# t=time()
new = len(one)*[None]
new[:-1] = [a + rand(0,1) * (b-a) for
a, b in zip(one[:-1], two[:-1])]
new[-1] = int(one[-1])
return new
def populate(data, atleast):
t=time()
newData = [dd.tolist() for dd in data]
if atleast-len(newData)<0:
try:
return [choice(newData) for _ in xrange(atleast)]
except:
set_trace()
else:
for _ in xrange(atleast-len(newData)):
one = choice(data)
neigh = knn(one, data)[1:k + 1]
try:
two = choice(neigh)
except IndexError:
two = one
newData.append(extrapolate(one, two))
return newData
def populate2(data1, data2):
newData = []
for _ in xrange(atleast):
for one in data1:
neigh = kfn(one, data)[1:k + 1]
try:
two = choice(neigh)
except IndexError:
two = one
newData.append(extrapolate(one, two))
return [choice(newData) for _ in xrange(atleast)]
def depopulate(data):
# if resample:
# newer = []
# for _ in xrange(atmost):
# orig = choice(data)
# newer.append(extrapolate(orig, knn(orig, data)[1]))
# return newer
# else:
return [choice(data).tolist() for _ in xrange(atmost)]
newCells = []
# rseed(1)
klass = lambda df: df[df.columns[-1]]
count = Counter(klass(data))
# set_trace()
atleast=50# if a==None else int(a*max([count[k] for k in count.keys()]))
atmost=100# if b==None else int(b*max([count[k] for k in count.keys()]))
major, minor = count.keys()
# set_trace()
for u in count.keys():
if u==minor:
newCells.extend(populate([r for r in data.as_matrix() if r[-1] == u], atleast=atleast))
if u==major:
newCells.extend(depopulate([r for r in data.as_matrix() if r[-1] == u]))
else:
newCells.extend([r.tolist() for r in data.as_matrix() if r[-1] == u])
# set_trace()
return pd.DataFrame(newCells, columns=data.columns)
def _smote():
"Test SMOTE"
dir = '../Data/Jureczko/camel/camel-1.6.csv'
Tbl = csv2DF([dir], as_mtx=False)
newTbl = SMOTE(Tbl)
print('Before SMOTE: ', Counter(Tbl[Tbl.columns[-1]]))
print('After SMOTE: ', Counter(newTbl[newTbl.columns[-1]]))
# ---- ::DEBUG:: -----
set_trace()
def rforest(train, test, tunings=None, smoteit=True, bin=True, smoteTune=True,regress=False):
"RF "
if tunings and smoteTune==False:
a=b=None
elif tunings and smoteTune==True:
a=tunings[-2]
b=tunings[-1]
if not isinstance(train, pd.core.frame.DataFrame):
train = csv2DF(train, as_mtx=False, toBin=bin)
if not isinstance(test, pd.core.frame.DataFrame):
test = csv2DF(test, as_mtx=False, toBin=True)
if smoteit:
if not tunings:
train = SMOTE(train, resample=True)
else:
train = SMOTE(train, a, b, resample=True)
# except: set_trace()
if not tunings:
if regress:
clf = RandomForestRegressor(n_estimators=100, random_state=1, warm_start=True,n_jobs=-1)
else:
clf = RandomForestClassifier(n_estimators=100, random_state=1, warm_start=True,n_jobs=-1)
else:
if regress:
clf = RandomForestRegressor(n_estimators=int(tunings[0]),
max_features=tunings[1] / 100,
min_samples_leaf=int(tunings[2]),
min_samples_split=int(tunings[3]),
warm_start=True,n_jobs=-1)
else:
clf = RandomForestClassifier(n_estimators=int(tunings[0]),
max_features=tunings[1] / 100,
min_samples_leaf=int(tunings[2]),
min_samples_split=int(tunings[3]),
warm_start=True,n_jobs=-1)
features = train.columns[:-1]
klass = train[train.columns[-1]]
clf.fit(train[features], klass)
actual = test[test.columns[-1]].as_matrix()
try: preds = clf.predict(test[test.columns[:-1]])
except: set_trace()
return actual, preds
def SVM(train, test, tunings=None, smoteit=True, bin=True, regress=False):
"SVM "
if not isinstance(train, pd.core.frame.DataFrame):
train = csv2DF(train, as_mtx=False, toBin=bin)
if not isinstance(test, pd.core.frame.DataFrame):
test = csv2DF(test, as_mtx=False, toBin=True)
if smoteit:
train = SMOTE(train, resample=True)
# except: set_trace()
if not tunings:
if regress:
clf = SVR()
else:
clf = SVC()
else:
if regress:
clf = SVR()
else:
clf = SVC()
features = train.columns[:-1]
klass = train[train.columns[-1]]
# set_trace()
clf.fit(train[features], klass)
actual = test[test.columns[-1]].as_matrix()
try: preds = clf.predict(test[test.columns[:-1]])
except: set_trace()
return actual, preds
def _RF():
dir = '../Data/Jureczko/'
train, test = explore(dir)
print('Dataset, Expt(F-Score)')
for tr,te in zip(train, test):
say(tr[0].split('/')[-1][:-8])
actual, predicted = rforest(tr, te)
abcd = ABCD(before=actual, after=predicted)
F = np.array([k.stats()[-2] for k in abcd()])
tC = Counter(actual)
FreqClass=[tC[kk]/len(actual) for kk in list(set(actual))]
ExptF = np.sum(F*FreqClass)
say(', %0.2f\n' % (ExptF))
# ---- ::DEBUG:: -----
set_trace()
if __name__ == '__main__':
_RF()
| mit |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/matplotlib/streamplot.py | 10 | 20629 | """
Streamline plotting for 2D vector fields.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import numpy as np
import matplotlib
import matplotlib.cm as cm
import matplotlib.colors as mcolors
import matplotlib.collections as mcollections
import matplotlib.lines as mlines
import matplotlib.patches as patches
__all__ = ['streamplot']
def streamplot(axes, x, y, u, v, density=1, linewidth=None, color=None,
cmap=None, norm=None, arrowsize=1, arrowstyle='-|>',
minlength=0.1, transform=None, zorder=None, start_points=None):
"""Draws streamlines of a vector flow.
*x*, *y* : 1d arrays
an *evenly spaced* grid.
*u*, *v* : 2d arrays
x and y-velocities. Number of rows should match length of y, and
the number of columns should match x.
*density* : float or 2-tuple
Controls the closeness of streamlines. When `density = 1`, the domain
is divided into a 30x30 grid---*density* linearly scales this grid.
Each cell in the grid can have, at most, one traversing streamline.
For different densities in each direction, use [density_x, density_y].
*linewidth* : numeric or 2d array
vary linewidth when given a 2d array with the same shape as velocities.
*color* : matplotlib color code, or 2d array
Streamline color. When given an array with the same shape as
velocities, *color* values are converted to colors using *cmap*.
*cmap* : :class:`~matplotlib.colors.Colormap`
Colormap used to plot streamlines and arrows. Only necessary when using
an array input for *color*.
*norm* : :class:`~matplotlib.colors.Normalize`
Normalize object used to scale luminance data to 0, 1. If None, stretch
(min, max) to (0, 1). Only necessary when *color* is an array.
*arrowsize* : float
Factor scale arrow size.
*arrowstyle* : str
Arrow style specification.
See :class:`~matplotlib.patches.FancyArrowPatch`.
*minlength* : float
Minimum length of streamline in axes coordinates.
*start_points*: Nx2 array
Coordinates of starting points for the streamlines.
In data coordinates, the same as the ``x`` and ``y`` arrays.
*zorder* : int
any number
Returns:
*stream_container* : StreamplotSet
Container object with attributes
- lines: `matplotlib.collections.LineCollection` of streamlines
- arrows: collection of `matplotlib.patches.FancyArrowPatch`
objects representing arrows half-way along stream
lines.
This container will probably change in the future to allow changes
to the colormap, alpha, etc. for both lines and arrows, but these
changes should be backward compatible.
"""
grid = Grid(x, y)
mask = StreamMask(density)
dmap = DomainMap(grid, mask)
if zorder is None:
zorder = mlines.Line2D.zorder
# default to data coordinates
if transform is None:
transform = axes.transData
if color is None:
color = axes._get_lines.get_next_color()
if linewidth is None:
linewidth = matplotlib.rcParams['lines.linewidth']
line_kw = {}
arrow_kw = dict(arrowstyle=arrowstyle, mutation_scale=10 * arrowsize)
use_multicolor_lines = isinstance(color, np.ndarray)
if use_multicolor_lines:
if color.shape != grid.shape:
msg = "If 'color' is given, must have the shape of 'Grid(x,y)'"
raise ValueError(msg)
line_colors = []
color = np.ma.masked_invalid(color)
else:
line_kw['color'] = color
arrow_kw['color'] = color
if isinstance(linewidth, np.ndarray):
if linewidth.shape != grid.shape:
msg = "If 'linewidth' is given, must have the shape of 'Grid(x,y)'"
raise ValueError(msg)
line_kw['linewidth'] = []
else:
line_kw['linewidth'] = linewidth
arrow_kw['linewidth'] = linewidth
line_kw['zorder'] = zorder
arrow_kw['zorder'] = zorder
## Sanity checks.
if (u.shape != grid.shape) or (v.shape != grid.shape):
msg = "'u' and 'v' must be of shape 'Grid(x,y)'"
raise ValueError(msg)
u = np.ma.masked_invalid(u)
v = np.ma.masked_invalid(v)
integrate = get_integrator(u, v, dmap, minlength)
trajectories = []
if start_points is None:
for xm, ym in _gen_starting_points(mask.shape):
if mask[ym, xm] == 0:
xg, yg = dmap.mask2grid(xm, ym)
t = integrate(xg, yg)
if t is not None:
trajectories.append(t)
else:
# Convert start_points from data to array coords
# Shift the seed points from the bottom left of the data so that
# data2grid works properly.
sp2 = np.asanyarray(start_points, dtype=np.float).copy()
sp2[:, 0] += np.abs(x[0])
sp2[:, 1] += np.abs(y[0])
for xs, ys in sp2:
xg, yg = dmap.data2grid(xs, ys)
t = integrate(xg, yg)
if t is not None:
trajectories.append(t)
if use_multicolor_lines:
if norm is None:
norm = mcolors.Normalize(color.min(), color.max())
if cmap is None:
cmap = cm.get_cmap(matplotlib.rcParams['image.cmap'])
else:
cmap = cm.get_cmap(cmap)
streamlines = []
arrows = []
for t in trajectories:
tgx = np.array(t[0])
tgy = np.array(t[1])
# Rescale from grid-coordinates to data-coordinates.
tx = np.array(t[0]) * grid.dx + grid.x_origin
ty = np.array(t[1]) * grid.dy + grid.y_origin
points = np.transpose([tx, ty]).reshape(-1, 1, 2)
streamlines.extend(np.hstack([points[:-1], points[1:]]))
# Add arrows half way along each trajectory.
s = np.cumsum(np.sqrt(np.diff(tx) ** 2 + np.diff(ty) ** 2))
n = np.searchsorted(s, s[-1] / 2.)
arrow_tail = (tx[n], ty[n])
arrow_head = (np.mean(tx[n:n + 2]), np.mean(ty[n:n + 2]))
if isinstance(linewidth, np.ndarray):
line_widths = interpgrid(linewidth, tgx, tgy)[:-1]
line_kw['linewidth'].extend(line_widths)
arrow_kw['linewidth'] = line_widths[n]
if use_multicolor_lines:
color_values = interpgrid(color, tgx, tgy)[:-1]
line_colors.append(color_values)
arrow_kw['color'] = cmap(norm(color_values[n]))
p = patches.FancyArrowPatch(
arrow_tail, arrow_head, transform=transform, **arrow_kw)
axes.add_patch(p)
arrows.append(p)
lc = mcollections.LineCollection(
streamlines, transform=transform, **line_kw)
lc.sticky_edges.x[:] = [grid.x_origin, grid.x_origin + grid.width]
lc.sticky_edges.y[:] = [grid.y_origin, grid.y_origin + grid.height]
if use_multicolor_lines:
lc.set_array(np.ma.hstack(line_colors))
lc.set_cmap(cmap)
lc.set_norm(norm)
axes.add_collection(lc)
axes.autoscale_view()
ac = matplotlib.collections.PatchCollection(arrows)
stream_container = StreamplotSet(lc, ac)
return stream_container
class StreamplotSet(object):
def __init__(self, lines, arrows, **kwargs):
self.lines = lines
self.arrows = arrows
# Coordinate definitions
# ========================
class DomainMap(object):
"""Map representing different coordinate systems.
Coordinate definitions:
* axes-coordinates goes from 0 to 1 in the domain.
* data-coordinates are specified by the input x-y coordinates.
* grid-coordinates goes from 0 to N and 0 to M for an N x M grid,
where N and M match the shape of the input data.
* mask-coordinates goes from 0 to N and 0 to M for an N x M mask,
where N and M are user-specified to control the density of streamlines.
This class also has methods for adding trajectories to the StreamMask.
Before adding a trajectory, run `start_trajectory` to keep track of regions
crossed by a given trajectory. Later, if you decide the trajectory is bad
(e.g., if the trajectory is very short) just call `undo_trajectory`.
"""
def __init__(self, grid, mask):
self.grid = grid
self.mask = mask
# Constants for conversion between grid- and mask-coordinates
self.x_grid2mask = float(mask.nx - 1) / grid.nx
self.y_grid2mask = float(mask.ny - 1) / grid.ny
self.x_mask2grid = 1. / self.x_grid2mask
self.y_mask2grid = 1. / self.y_grid2mask
self.x_data2grid = grid.nx / grid.width
self.y_data2grid = grid.ny / grid.height
def grid2mask(self, xi, yi):
"""Return nearest space in mask-coords from given grid-coords."""
return (int((xi * self.x_grid2mask) + 0.5),
int((yi * self.y_grid2mask) + 0.5))
def mask2grid(self, xm, ym):
return xm * self.x_mask2grid, ym * self.y_mask2grid
def data2grid(self, xd, yd):
return xd * self.x_data2grid, yd * self.y_data2grid
def start_trajectory(self, xg, yg):
xm, ym = self.grid2mask(xg, yg)
self.mask._start_trajectory(xm, ym)
def reset_start_point(self, xg, yg):
xm, ym = self.grid2mask(xg, yg)
self.mask._current_xy = (xm, ym)
def update_trajectory(self, xg, yg):
if not self.grid.within_grid(xg, yg):
raise InvalidIndexError
xm, ym = self.grid2mask(xg, yg)
self.mask._update_trajectory(xm, ym)
def undo_trajectory(self):
self.mask._undo_trajectory()
class Grid(object):
"""Grid of data."""
def __init__(self, x, y):
if x.ndim == 1:
pass
elif x.ndim == 2:
x_row = x[0, :]
if not np.allclose(x_row, x):
raise ValueError("The rows of 'x' must be equal")
x = x_row
else:
raise ValueError("'x' can have at maximum 2 dimensions")
if y.ndim == 1:
pass
elif y.ndim == 2:
y_col = y[:, 0]
if not np.allclose(y_col, y.T):
raise ValueError("The columns of 'y' must be equal")
y = y_col
else:
raise ValueError("'y' can have at maximum 2 dimensions")
self.nx = len(x)
self.ny = len(y)
self.dx = x[1] - x[0]
self.dy = y[1] - y[0]
self.x_origin = x[0]
self.y_origin = y[0]
self.width = x[-1] - x[0]
self.height = y[-1] - y[0]
@property
def shape(self):
return self.ny, self.nx
def within_grid(self, xi, yi):
"""Return True if point is a valid index of grid."""
# Note that xi/yi can be floats; so, for example, we can't simply check
# `xi < self.nx` since `xi` can be `self.nx - 1 < xi < self.nx`
return xi >= 0 and xi <= self.nx - 1 and yi >= 0 and yi <= self.ny - 1
class StreamMask(object):
"""Mask to keep track of discrete regions crossed by streamlines.
The resolution of this grid determines the approximate spacing between
trajectories. Streamlines are only allowed to pass through zeroed cells:
When a streamline enters a cell, that cell is set to 1, and no new
streamlines are allowed to enter.
"""
def __init__(self, density):
if np.isscalar(density):
if density <= 0:
raise ValueError("If a scalar, 'density' must be positive")
self.nx = self.ny = int(30 * density)
else:
if len(density) != 2:
raise ValueError("'density' can have at maximum 2 dimensions")
self.nx = int(30 * density[0])
self.ny = int(30 * density[1])
self._mask = np.zeros((self.ny, self.nx))
self.shape = self._mask.shape
self._current_xy = None
def __getitem__(self, *args):
return self._mask.__getitem__(*args)
def _start_trajectory(self, xm, ym):
"""Start recording streamline trajectory"""
self._traj = []
self._update_trajectory(xm, ym)
def _undo_trajectory(self):
"""Remove current trajectory from mask"""
for t in self._traj:
self._mask.__setitem__(t, 0)
def _update_trajectory(self, xm, ym):
"""Update current trajectory position in mask.
If the new position has already been filled, raise `InvalidIndexError`.
"""
if self._current_xy != (xm, ym):
if self[ym, xm] == 0:
self._traj.append((ym, xm))
self._mask[ym, xm] = 1
self._current_xy = (xm, ym)
else:
raise InvalidIndexError
class InvalidIndexError(Exception):
pass
class TerminateTrajectory(Exception):
pass
# Integrator definitions
#========================
def get_integrator(u, v, dmap, minlength):
# rescale velocity onto grid-coordinates for integrations.
u, v = dmap.data2grid(u, v)
# speed (path length) will be in axes-coordinates
u_ax = u / dmap.grid.nx
v_ax = v / dmap.grid.ny
speed = np.ma.sqrt(u_ax ** 2 + v_ax ** 2)
def forward_time(xi, yi):
ds_dt = interpgrid(speed, xi, yi)
if ds_dt == 0:
raise TerminateTrajectory()
dt_ds = 1. / ds_dt
ui = interpgrid(u, xi, yi)
vi = interpgrid(v, xi, yi)
return ui * dt_ds, vi * dt_ds
def backward_time(xi, yi):
dxi, dyi = forward_time(xi, yi)
return -dxi, -dyi
def integrate(x0, y0):
"""Return x, y grid-coordinates of trajectory based on starting point.
Integrate both forward and backward in time from starting point in
grid coordinates.
Integration is terminated when a trajectory reaches a domain boundary
or when it crosses into an already occupied cell in the StreamMask. The
resulting trajectory is None if it is shorter than `minlength`.
"""
dmap.start_trajectory(x0, y0)
sf, xf_traj, yf_traj = _integrate_rk12(x0, y0, dmap, forward_time)
dmap.reset_start_point(x0, y0)
sb, xb_traj, yb_traj = _integrate_rk12(x0, y0, dmap, backward_time)
# combine forward and backward trajectories
stotal = sf + sb
x_traj = xb_traj[::-1] + xf_traj[1:]
y_traj = yb_traj[::-1] + yf_traj[1:]
if stotal > minlength:
return x_traj, y_traj
else: # reject short trajectories
dmap.undo_trajectory()
return None
return integrate
def _integrate_rk12(x0, y0, dmap, f):
"""2nd-order Runge-Kutta algorithm with adaptive step size.
This method is also referred to as the improved Euler's method, or Heun's
method. This method is favored over higher-order methods because:
1. To get decent looking trajectories and to sample every mask cell
on the trajectory we need a small timestep, so a lower order
solver doesn't hurt us unless the data is *very* high resolution.
In fact, for cases where the user inputs
data smaller or of similar grid size to the mask grid, the higher
order corrections are negligible because of the very fast linear
interpolation used in `interpgrid`.
2. For high resolution input data (i.e. beyond the mask
resolution), we must reduce the timestep. Therefore, an adaptive
timestep is more suited to the problem as this would be very hard
to judge automatically otherwise.
This integrator is about 1.5 - 2x as fast as both the RK4 and RK45
solvers in most setups on my machine. I would recommend removing the
other two to keep things simple.
"""
# This error is below that needed to match the RK4 integrator. It
# is set for visual reasons -- too low and corners start
# appearing ugly and jagged. Can be tuned.
maxerror = 0.003
# This limit is important (for all integrators) to avoid the
# trajectory skipping some mask cells. We could relax this
# condition if we use the code which is commented out below to
# increment the location gradually. However, due to the efficient
# nature of the interpolation, this doesn't boost speed by much
# for quite a bit of complexity.
maxds = min(1. / dmap.mask.nx, 1. / dmap.mask.ny, 0.1)
ds = maxds
stotal = 0
xi = x0
yi = y0
xf_traj = []
yf_traj = []
while dmap.grid.within_grid(xi, yi):
xf_traj.append(xi)
yf_traj.append(yi)
try:
k1x, k1y = f(xi, yi)
k2x, k2y = f(xi + ds * k1x,
yi + ds * k1y)
except IndexError:
# Out of the domain on one of the intermediate integration steps.
# Take an Euler step to the boundary to improve neatness.
ds, xf_traj, yf_traj = _euler_step(xf_traj, yf_traj, dmap, f)
stotal += ds
break
except TerminateTrajectory:
break
dx1 = ds * k1x
dy1 = ds * k1y
dx2 = ds * 0.5 * (k1x + k2x)
dy2 = ds * 0.5 * (k1y + k2y)
nx, ny = dmap.grid.shape
# Error is normalized to the axes coordinates
error = np.sqrt(((dx2 - dx1) / nx) ** 2 + ((dy2 - dy1) / ny) ** 2)
# Only save step if within error tolerance
if error < maxerror:
xi += dx2
yi += dy2
try:
dmap.update_trajectory(xi, yi)
except InvalidIndexError:
break
if (stotal + ds) > 2:
break
stotal += ds
# recalculate stepsize based on step error
if error == 0:
ds = maxds
else:
ds = min(maxds, 0.85 * ds * (maxerror / error) ** 0.5)
return stotal, xf_traj, yf_traj
def _euler_step(xf_traj, yf_traj, dmap, f):
"""Simple Euler integration step that extends streamline to boundary."""
ny, nx = dmap.grid.shape
xi = xf_traj[-1]
yi = yf_traj[-1]
cx, cy = f(xi, yi)
if cx == 0:
dsx = np.inf
elif cx < 0:
dsx = xi / -cx
else:
dsx = (nx - 1 - xi) / cx
if cy == 0:
dsy = np.inf
elif cy < 0:
dsy = yi / -cy
else:
dsy = (ny - 1 - yi) / cy
ds = min(dsx, dsy)
xf_traj.append(xi + cx * ds)
yf_traj.append(yi + cy * ds)
return ds, xf_traj, yf_traj
# Utility functions
# ========================
def interpgrid(a, xi, yi):
"""Fast 2D, linear interpolation on an integer grid"""
Ny, Nx = np.shape(a)
if isinstance(xi, np.ndarray):
x = xi.astype(np.int)
y = yi.astype(np.int)
# Check that xn, yn don't exceed max index
xn = np.clip(x + 1, 0, Nx - 1)
yn = np.clip(y + 1, 0, Ny - 1)
else:
x = np.int(xi)
y = np.int(yi)
# conditional is faster than clipping for integers
if x == (Nx - 2):
xn = x
else:
xn = x + 1
if y == (Ny - 2):
yn = y
else:
yn = y + 1
a00 = a[y, x]
a01 = a[y, xn]
a10 = a[yn, x]
a11 = a[yn, xn]
xt = xi - x
yt = yi - y
a0 = a00 * (1 - xt) + a01 * xt
a1 = a10 * (1 - xt) + a11 * xt
ai = a0 * (1 - yt) + a1 * yt
if not isinstance(xi, np.ndarray):
if np.ma.is_masked(ai):
raise TerminateTrajectory
return ai
def _gen_starting_points(shape):
"""Yield starting points for streamlines.
Trying points on the boundary first gives higher quality streamlines.
This algorithm starts with a point on the mask corner and spirals inward.
This algorithm is inefficient, but fast compared to rest of streamplot.
"""
ny, nx = shape
xfirst = 0
yfirst = 1
xlast = nx - 1
ylast = ny - 1
x, y = 0, 0
i = 0
direction = 'right'
for i in xrange(nx * ny):
yield x, y
if direction == 'right':
x += 1
if x >= xlast:
xlast -= 1
direction = 'up'
elif direction == 'up':
y += 1
if y >= ylast:
ylast -= 1
direction = 'left'
elif direction == 'left':
x -= 1
if x <= xfirst:
xfirst += 1
direction = 'down'
elif direction == 'down':
y -= 1
if y <= yfirst:
yfirst += 1
direction = 'right'
| gpl-3.0 |
LEX2016WoKaGru/pyClamster | scripts/session/FE3_session_600.py | 1 | 1705 | #!/usr/bin/env python3
import pyclamster
import logging
import pickle
import os,sys
import matplotlib.pyplot as plt
# set up logging
logging.basicConfig(level=logging.DEBUG)
sessionfile = "data/sessions/FE3_session_new_600.pk"
try: # maybe there is already a session
session = pickle.load(open(sessionfile,"rb"))
except: # if not
# read calibration
calib = pickle.load(open("data/fe3/FE3_straight+projcal.pk","rb"))
# create session
session = pyclamster.CameraSession(
latitude = 54.4947,
longitude = 11.240817,
heightNN = 9.0,
zone = 32,
imgshape = (1920,1920),
smallshape = (960,960),
rectshape = (900,900),
calibration = calib
)
# add images to session
#session.add_images("/home/yann/Studium/LEX/LEX/cam/cam3/FE3*.jpg")
# create distortion map
session.createDistortionMap(max_angle=pyclamster.deg2rad(45))
# save thie session
session.reset_images()
session.save(sessionfile)
# loop over all images
#for image in session.iterate_over_rectified_images():
#filename = image._get_time_from_filename("FE3_Image_%Y%m%d_%H%M%S_UTCp1.jpg")
#image.image.save("plots/images/fe3/newcalib/{}-rect.jpg".format(filename))
# loop over all images
session.add_images("/home/yann/Studium/LEX/LEX/cam/cam3/FE3*.jpg")
for image in session.iterate_over_rectified_images():
plt.subplot(131)
plt.imshow(image.data)
plt.title("image")
plt.subplot(132)
plt.imshow(image.coordinates.elevation,cmap="Blues")
plt.title("elevation")
plt.subplot(133)
plt.imshow(image.coordinates.azimuth,cmap="Blues")
plt.title("azimuth")
plt.show()
| gpl-3.0 |
xubenben/scikit-learn | examples/ensemble/plot_adaboost_hastie_10_2.py | 355 | 3576 | """
=============================
Discrete versus Real AdaBoost
=============================
This example is based on Figure 10.2 from Hastie et al 2009 [1] and illustrates
the difference in performance between the discrete SAMME [2] boosting
algorithm and real SAMME.R boosting algorithm. Both algorithms are evaluated
on a binary classification task where the target Y is a non-linear function
of 10 input features.
Discrete SAMME AdaBoost adapts based on errors in predicted class labels
whereas real SAMME.R uses the predicted class probabilities.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>,
# Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import zero_one_loss
from sklearn.ensemble import AdaBoostClassifier
n_estimators = 400
# A learning rate of 1. may not be optimal for both SAMME and SAMME.R
learning_rate = 1.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_test, y_test = X[2000:], y[2000:]
X_train, y_train = X[:2000], y[:2000]
dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
dt_stump.fit(X_train, y_train)
dt_stump_err = 1.0 - dt_stump.score(X_test, y_test)
dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1)
dt.fit(X_train, y_train)
dt_err = 1.0 - dt.score(X_test, y_test)
ada_discrete = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME")
ada_discrete.fit(X_train, y_train)
ada_real = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME.R")
ada_real.fit(X_train, y_train)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-',
label='Decision Stump Error')
ax.plot([1, n_estimators], [dt_err] * 2, 'k--',
label='Decision Tree Error')
ada_discrete_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_test)):
ada_discrete_err[i] = zero_one_loss(y_pred, y_test)
ada_discrete_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_train)):
ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train)
ada_real_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_test)):
ada_real_err[i] = zero_one_loss(y_pred, y_test)
ada_real_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_train)):
ada_real_err_train[i] = zero_one_loss(y_pred, y_train)
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err,
label='Discrete AdaBoost Test Error',
color='red')
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train,
label='Discrete AdaBoost Train Error',
color='blue')
ax.plot(np.arange(n_estimators) + 1, ada_real_err,
label='Real AdaBoost Test Error',
color='orange')
ax.plot(np.arange(n_estimators) + 1, ada_real_err_train,
label='Real AdaBoost Train Error',
color='green')
ax.set_ylim((0.0, 0.5))
ax.set_xlabel('n_estimators')
ax.set_ylabel('error rate')
leg = ax.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.7)
plt.show()
| bsd-3-clause |
ajdawson/windspharm | examples/cdms/sfvp_example.py | 1 | 2297 | """
Compute streamfunction and velocity potential from the long-term-mean
flow.
This example uses the cdms interface.
Additional requirements for this example:
* cdms2 (http://uvcdat.llnl.gov/)
* matplotlib (http://matplotlib.org/)
* cartopy (http://scitools.org.uk/cartopy/)
"""
import cartopy.crs as ccrs
import cdms2
import matplotlib as mpl
import matplotlib.pyplot as plt
from windspharm.cdms import VectorWind
from windspharm.examples import example_data_path
mpl.rcParams['mathtext.default'] = 'regular'
# Read zonal and meridional wind components from file using the cdms2 module
# from CDAT. The components are in separate files.
ncu = cdms2.open(example_data_path('uwnd_mean.nc'), 'r')
uwnd = ncu('uwnd')
ncu.close()
ncv = cdms2.open(example_data_path('vwnd_mean.nc'), 'r')
vwnd = ncv('vwnd')
ncv.close()
# Create a VectorWind instance to handle the computation of streamfunction and
# velocity potential.
w = VectorWind(uwnd, vwnd)
# Compute the streamfunction and velocity potential.
sf, vp = w.sfvp()
# Pick out the field for December and add a cyclic point (the cyclic point is
# for plotting purposes).
sf_dec = sf(time=slice(11, 12), longitude=(0, 360), squeeze=True)
vp_dec = vp(time=slice(11, 12), longitude=(0, 360), squeeze=True)
# Plot streamfunction.
ax1 = plt.axes(projection=ccrs.PlateCarree(central_longitude=180))
lons, lats = sf_dec.getLongitude()[:], sf_dec.getLatitude()[:]
clevs = [-120, -100, -80, -60, -40, -20, 0, 20, 40, 60, 80, 100, 120]
fill_sf = ax1.contourf(lons, lats, sf_dec.asma() * 1e-06, clevs,
transform=ccrs.PlateCarree(), cmap=plt.cm.RdBu_r,
extend='both')
ax1.coastlines()
ax1.gridlines()
plt.colorbar(fill_sf, orientation='horizontal')
plt.title('Streamfunction ($10^6$m$^2$s$^{-1}$)', fontsize=16)
# Plot velocity potential.
plt.figure()
ax2 = plt.axes(projection=ccrs.PlateCarree(central_longitude=180))
clevs = [-10, -8, -6, -4, -2, 0, 2, 4, 6, 8, 10]
fill_vp = ax2.contourf(lons, lats, vp_dec.asma() * 1e-06, clevs,
transform=ccrs.PlateCarree(), cmap=plt.cm.RdBu_r,
extend='both')
ax2.coastlines()
ax2.gridlines()
plt.colorbar(fill_vp, orientation='horizontal')
plt.title('Velocity Potential ($10^6$m$^2$s$^{-1}$)', fontsize=16)
plt.show()
| mit |
google/makani | analysis/aero/avl/avl_reader.py | 1 | 21745 | #!/usr/bin/python
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for parsing and analyzing AVL files."""
import collections
import importlib
import logging
import sys
import warnings
import numpy
class AvlReader(object):
"""Parses and analyzes AVL files.
Attributes:
filename: Filename of the AVL file to be processed.
avl: Ordered dict that represents the parsed structure of the AVL file.
properties: Dict that represents properties of the aircraft,
surfaces, and surface sections. Its structure mimics that of
the AVL file itself.
"""
def __init__(self, filename):
"""Initializes the class by parsing the AVL file."""
self.filename = filename
with open(filename, 'r') as f:
self.avl = self.Parse(f.read())
self.properties = self.Analyze(self.avl)
def Analyze(self, avl):
"""Analyze properties of the AVL geometry.
Args:
avl: Ordered dict representing a parsed AVL file.
Returns:
Dict that represents properties of the aircraft, surfaces, and
surface sections. Its structure mimics that of the AVL file
itself.
"""
properties = dict()
properties['surfaces'] = []
for avl_surface in avl['surfaces']:
transform = self._GetSurfaceTransformation(avl_surface)
sections = []
for avl_section in avl_surface['sections']:
sections.append(self._CalcSectionProperties(avl_section, transform))
panels = []
for section1, section2 in zip(sections[0:-1], sections[1:]):
panels.append(self._CalcPanelProperties(section1, section2))
surface = self._CalcSurfaceProperties(sections, panels)
surface['name'] = avl_surface['name']
surface['sections'] = sections
surface['panels'] = panels
properties['surfaces'].append(surface)
return properties
def _CalcSectionProperties(self, avl_section, transform=lambda x: x):
"""Calculates the properties of sections, i.e. stations along the span."""
# Apply the scaling and offset parameters, if any, from the AVL
# file.
chord = avl_section['Chord'] * transform([0.0, 1.0, 0.0])[1]
leading_edge_avl = transform([avl_section['Xle'],
avl_section['Yle'],
avl_section['Zle']])
return {
'chord': chord,
'incidence': numpy.pi / 180.0 * avl_section['Ainc'],
'leading_edge_b': numpy.array([-leading_edge_avl[0],
leading_edge_avl[1],
-leading_edge_avl[2]]),
'quarter_chord_b': numpy.array([-leading_edge_avl[0] - chord / 4.0,
leading_edge_avl[1],
-leading_edge_avl[2]])
}
def _CalcPanelProperties(self, section1, section2):
"""Calculates properties of the areas between sections."""
span = numpy.sqrt(
(section2['leading_edge_b'][1] - section1['leading_edge_b'][1])**2.0 +
(section2['leading_edge_b'][2] - section1['leading_edge_b'][2])**2.0)
area = (section1['chord'] + section2['chord']) * span / 2.0
taper_ratio = section2['chord'] / section1['chord']
c = ((2.0 * section1['chord'] + section2['chord']) /
(section1['chord'] + section2['chord']) / 3.0)
mean_incidence = (c * section1['incidence'] +
(1.0 - c) * section2['incidence'])
aerodynamic_center_b = (c * section1['quarter_chord_b'] +
(1.0 - c) * section2['quarter_chord_b'])
return {
'aerodynamic_center_b': aerodynamic_center_b,
'area': area,
'mean_aerodynamic_chord': (2.0 / 3.0 * section1['chord'] *
(1.0 + taper_ratio + taper_ratio**2.0) /
(1.0 + taper_ratio)),
'mean_incidence': mean_incidence,
'taper_ratio': taper_ratio,
'span': span,
'standard_mean_chord': area / span
}
def _CalcSurfaceProperties(self, sections, panels):
"""Calculates properties of full surfaces."""
area = 0.0
aerodynamic_center_b = numpy.array([0.0, 0.0, 0.0])
mean_aerodynamic_chord = 0.0
mean_incidence = 0.0
for panel in panels:
area += panel['area']
aerodynamic_center_b += panel['area'] * panel['aerodynamic_center_b']
mean_aerodynamic_chord += panel['area'] * panel['mean_aerodynamic_chord']
mean_incidence += panel['area'] * panel['mean_incidence']
aerodynamic_center_b /= area
mean_aerodynamic_chord /= area
mean_incidence /= area
# Set the span vector from the leading edge of the first section
# to the leading edge of the last section. Ignore the x
# component. Choose the direction such that the span is along the
# surface coordinate y axis.
span_b = sections[0]['leading_edge_b'] - sections[-1]['leading_edge_b']
span_b[0] = 0.0
if abs(span_b[1]) > abs(span_b[2]):
if span_b[1] < 0.0:
span_b *= -1.0
else:
if span_b[2] < 0.0:
span_b *= -1.0
span = numpy.linalg.norm(span_b)
# Surface coordinates are defined such that they are aligned with
# body coordinates for horizontal surfaces and are rotated about
# body x such that surface z is aligned with the *negative* body y
# for vertical surfaces. The negative is required to match the
# convention in AVL.
surface_x_b = [1.0, 0.0, 0.0]
surface_y_b = span_b / span
surface_z_b = numpy.cross(surface_x_b, surface_y_b)
return {
'aerodynamic_center_b': aerodynamic_center_b,
'area': area,
'aspect_ratio': span * span / area,
'dcm_b2surface': numpy.array([surface_x_b, surface_y_b, surface_z_b]),
'mean_aerodynamic_chord': mean_aerodynamic_chord,
'mean_incidence': mean_incidence,
'span': span,
'standard_mean_chord': area / span
}
def _GetSurfaceTransformation(self, surface):
"""Returns surface scaling and offset transformation function."""
if all([k in surface for k in ['Xscale', 'Yscale', 'Zscale']]):
scale = [surface['Xscale'], surface['Yscale'], surface['Zscale']]
else:
scale = [1.0, 1.0, 1.0]
if all([k in surface for k in ['dX', 'dY', 'dZ']]):
offset = [surface['dX'], surface['dY'], surface['dZ']]
else:
offset = [0.0, 0.0, 0.0]
return lambda coord: [x * m + b for x, m, b in zip(coord, scale, offset)]
def PlotGeometry(self):
"""Plots 3-D line drawing of surfaces."""
# b/120081442: Next lines removed the module initialization load of the
# matplotlib module which was causing a bazel pip-installed package issue on
# batch sim workers.
pyplot = importlib.import_module('matplotlib.pyplot')
mplot_3d = importlib.import_module('mpl_toolkits.mplot3d')
# Importing Axes3D has the side effect of enabling 3D projections, but
# it is not directly used, so we remove it here.
del mplot_3d.Axes3D
axes = pyplot.figure().add_subplot(1, 1, 1, projection='3d')
axes.w_xaxis.set_pane_color((0.8, 0.8, 0.8, 1.0))
axes.w_yaxis.set_pane_color((0.8, 0.8, 0.8, 1.0))
axes.w_zaxis.set_pane_color((0.8, 0.8, 0.8, 1.0))
axes.w_xaxis.gridlines.set_color(('blue'))
axes.w_yaxis.gridlines.set_color(('blue'))
axes.w_zaxis.gridlines.set_color(('blue'))
# The _axinfo update requires additional specification of linestyle and
# linewidth on our linux distributions in order to function properly.
axes.w_xaxis._axinfo.update( # pylint: disable=protected-access
{'grid': {'color': (0.7, 0.7, 0.7, 1.0), 'linestyle': '-',
'linewidth': 0.8}})
axes.w_yaxis._axinfo.update( # pylint: disable=protected-access
{'grid': {'color': (0.7, 0.7, 0.7, 1.0), 'linestyle': '-',
'linewidth': 0.8}})
axes.w_zaxis._axinfo.update( # pylint: disable=protected-access
{'grid': {'color': (0.7, 0.7, 0.7, 1.0), 'linestyle': '-',
'linewidth': 0.8}})
axes.set_aspect('equal')
axes.set_xlabel('x')
axes.set_ylabel('y')
axes.set_zlabel('z')
half_span = self.avl['Bref'] / 2.0
axes.set_xlim((-half_span * 0.5, half_span * 1.5))
axes.set_ylim((-half_span, half_span))
axes.set_zlim((-half_span, half_span))
color_order = ['black', 'brown', 'red', 'orange', 'yellow', 'green', 'blue',
'violet', 'gray']
legend_plots = []
legend_labels = []
for i, surface in enumerate(self.avl['surfaces']):
transform = self._GetSurfaceTransformation(surface)
leading_edge_xs = []
leading_edge_ys = []
leading_edge_zs = []
trailing_edge_xs = []
trailing_edge_ys = []
trailing_edge_zs = []
for section in surface['sections']:
coord = transform([section['Xle'], section['Yle'], section['Zle']])
leading_edge_xs.append(coord[0])
leading_edge_ys.append(coord[1])
leading_edge_zs.append(coord[2])
coord = transform([section['Xle'] + section['Chord'],
section['Yle'],
section['Zle']])
trailing_edge_xs.append(coord[0])
trailing_edge_ys.append(coord[1])
trailing_edge_zs.append(coord[2])
xs = leading_edge_xs + list(reversed(trailing_edge_xs))
ys = leading_edge_ys + list(reversed(trailing_edge_ys))
zs = leading_edge_zs + list(reversed(trailing_edge_zs))
surface_line, = axes.plot(xs + [xs[0]], ys + [ys[0]], zs + [zs[0]],
color=color_order[i])
legend_plots.append(surface_line)
legend_labels.append(surface['name'])
# Plot symmetric surfaces.
if self.avl['iYsym']:
axes.plot(xs + [xs[0]], -numpy.array(ys + [ys[0]]), zs + [zs[0]], '--',
color=color_order[i])
elif 'Ydupl' in surface:
y_scale = surface['Yscale'] if 'Yscale' in surface else 1.0
axes.plot(xs + [xs[0]],
-numpy.array(ys + [ys[0]]) + 2.0 * surface['Ydupl'] * y_scale,
zs + [zs[0]], '--',
color=color_order[i])
axes.legend(legend_plots, legend_labels, loc='lower left',
prop={'size': 10})
pyplot.show()
def Parse(self, avl_file):
"""Parses AVL file.
Args:
avl_file: String of the read AVL file.
Returns:
Dictionary representing the information stored in the AVL file.
"""
# Make iterator over lines in file. Automatically, remove comments
# and blank lines. Terminate the file with an END keyword (this
# isn't mentioned in the AVL documentation, but at least one of the
# example files uses this convention and it makes the parsing more
# natural.
lines = iter([l.split('!', 1)[0].strip()
for l in avl_file.splitlines()
if l.strip() and l[0] not in '#!'] + ['END'])
# Parse the AVL header for information on the case name, reference
# areas, etc.
avl, line = self._ParseHeader(lines)
# Loop through the rest of the file, which should only be composed
# of surfaces and bodies.
while True:
tokens = line.split()
keyword = tokens[0][0:4]
if keyword == 'SURFACE'[0:4]:
surface, line = self._ParseSurface(lines)
avl.setdefault('surfaces', []).append(surface)
elif keyword == 'BODY':
body, line = self._ParseBody(lines)
avl.setdefault('body', []).append(body)
else:
if keyword != 'END':
logging.error('Encountered unexpected keyword: %s', tokens[0])
break
return avl
def _ParseHeader(self, lines):
"""Parses header information."""
header = collections.OrderedDict()
header['case'] = lines.next()
tokens = lines.next().split()
header['Mach'] = float(tokens[0])
tokens = lines.next().split()
header['iYsym'] = int(tokens[0])
header['iZsym'] = int(tokens[1])
header['Zsym'] = float(tokens[2])
tokens = lines.next().split()
header['Sref'] = float(tokens[0])
header['Cref'] = float(tokens[1])
header['Bref'] = float(tokens[2])
tokens = lines.next().split()
header['Xref'] = float(tokens[0])
header['Yref'] = float(tokens[1])
header['Zref'] = float(tokens[2])
line = lines.next()
try:
# CDp is optional.
header['CDp'] = float(line.split()[0])
line = lines.next()
except (IndexError, ValueError):
pass
return header, line
def _ParseAirfoil(self, lines):
"""Parses airfoil camber line definition."""
airfoil = [[]]
while True:
line = lines.next()
tokens = line.split()
try:
airfoil.append([float(tokens[0]), float(tokens[1])])
except (IndexError, ValueError):
break
return airfoil, line
def _ParseFilename(self, lines):
"""Parses filename of airfoil definition."""
line = lines.next()
# The file name may either be quoted or not.
if line[0] == '"':
filename = line.split()[0][1:-1]
else:
filename = line
return filename
def _ParseSection(self, lines):
"""Parses information describing cross-section of surface along span."""
section = collections.OrderedDict()
tokens = lines.next().split()
section['Xle'] = float(tokens[0])
section['Yle'] = float(tokens[1])
section['Zle'] = float(tokens[2])
section['Chord'] = float(tokens[3])
section['Ainc'] = float(tokens[4])
try:
# Nspan and Sspace are optional.
section['Nspan'] = int(tokens[5])
section['Sspace'] = float(tokens[6])
except (IndexError, ValueError):
pass
next_line = None
first_keyword = True
while True:
line = next_line if next_line else lines.next()
next_line = None
tokens = line.split()
keyword = tokens[0][0:4]
# Issue warnings if there is a suspicious ordering of the camber
# line keywords. According to the AVL documentation, the camber
# line keywords must immediately follow the data line of the
# SECTION keyword, and also later camber line keywords overwrite
# earlier ones.
if keyword in ['NACA', 'AIRFOIL'[0:4], 'AFILE'[0:4]]:
if not first_keyword:
logging.warning('%s did not immediately follow the data line of the '
'SECTION keyword.', tokens[0])
if any([k in section for k in ['naca', 'airfoil', 'afile']]):
logging.warning('Another camber line definition exists. This will '
'overwrite it.')
if keyword == 'NACA':
# Parse NACA camber line.
section['naca'] = int(lines.next().split()[0])
assert 0 <= section['naca'] and section['naca'] <= 9999
elif keyword == 'AIRFOIL'[0:4]:
# Parse airfoil coordinates.
try:
# x/c range is optional.
section['x1'] = float(tokens[1])
section['x2'] = float(tokens[2])
except (IndexError, ValueError):
pass
section['airfoil'], next_line = self._ParseAirfoil(lines)
elif keyword == 'AFILE'[0:4]:
# Parse airfoil filename.
try:
# x/c range is optional.
section['x1'] = float(tokens[1])
section['x2'] = float(tokens[2])
except (IndexError, ValueError):
pass
section['afile'] = self._ParseFilename(lines)
elif keyword == 'DESIGN'[0:4]:
# Parse design variable.
tokens = lines.next().split()
design = collections.OrderedDict()
design['DName'] = tokens[0]
try:
design['Wdes'] = float(tokens[1])
except (IndexError, ValueError):
# Although it is not listed as an optional value in the AVL
# documentation, some of the example AVL files do not have a
# value for Wdes.
logging.warning('Wdes value is missing for %s.', design['DName'])
section.setdefault('designs', []).append(design)
elif keyword == 'CONTROL'[0:4]:
# Parse control variable.
tokens = lines.next().split()
control = collections.OrderedDict()
control['name'] = tokens[0]
control['gain'] = float(tokens[1])
control['Xhinge'] = float(tokens[2])
control['XYZhvec'] = [float(tokens[3]),
float(tokens[4]),
float(tokens[5])]
try:
control['SgnDup'] = float(tokens[6])
except (IndexError, ValueError):
# Although it is not listed as an optional value in the AVL
# documentation, some of the example AVL files do not have a
# value for SgnDup.
logging.warning('SgnDup value is missing for %s.', control['name'])
section.setdefault('controls', []).append(control)
elif keyword == 'CLAF':
# Parse dCL/da scaling factor.
section['CLaf'] = float(lines.next().split()[0])
elif keyword == 'CDCL':
# Parse CD(CL) function parameters.
tokens = lines.next().split()
section['CL1'] = float(tokens[0])
section['CD1'] = float(tokens[1])
section['CL2'] = float(tokens[2])
section['CD2'] = float(tokens[3])
section['CL3'] = float(tokens[4])
section['CD3'] = float(tokens[5])
else:
break
first_keyword = False
return section, line
def _ParseSurface(self, lines):
"""Parses definition of a lifting surface."""
surface = collections.OrderedDict()
surface['name'] = lines.next()
tokens = lines.next().split()
surface['Nchord'] = int(tokens[0])
surface['Cspace'] = float(tokens[1])
try:
# Nspan and Sspace are optional.
surface['Nspan'] = int(tokens[2])
surface['Sspace'] = float(tokens[3])
except (IndexError, ValueError):
pass
next_line = None
while True:
line = next_line if next_line else lines.next()
next_line = None
keyword = line.split()[0][0:4]
if keyword in ['COMPONENT'[0:4], 'INDEX'[0:4]]:
# Parse component grouping.
surface['Lcomp'] = int(lines.next().split()[0])
elif keyword == 'YDUPLICATE'[0:4]:
# Parse duplicated surface y-plane.
surface['Ydupl'] = float(lines.next().split()[0])
elif keyword == 'SCALE'[0:4]:
# Parse surface scaling.
tokens = lines.next().split()
surface['Xscale'] = float(tokens[0])
surface['Yscale'] = float(tokens[1])
surface['Zscale'] = float(tokens[2])
elif keyword == 'TRANSLATE'[0:4]:
# Parse surface translation.
tokens = lines.next().split()
surface['dX'] = float(tokens[0])
surface['dY'] = float(tokens[1])
surface['dZ'] = float(tokens[2])
elif keyword == 'ANGLE'[0:4]:
# Parse surface incidence angle.
surface['dAinc'] = float(lines.next().split()[0])
elif keyword == 'NOWAKE'[0:4]:
surface['nowake'] = True
elif keyword == 'NOALBE'[0:4]:
surface['noalbe'] = True
elif keyword == 'NOLOAD'[0:4]:
surface['noload'] = True
elif keyword == 'SECTION'[0:4]:
# Parse airfoil section camber line along span.
section, next_line = self._ParseSection(lines)
surface.setdefault('sections', []).append(section)
else:
break
return surface, line
def _ParseBody(self, lines):
"""Parses description of non-lifting bodies shape."""
body = collections.OrderedDict()
body['name'] = lines.next()
tokens = lines.next().split()
body['Nbody'] = int(tokens[0])
body['Bspace'] = float(tokens[1])
while True:
line = lines.next()
keyword = line.split()[0][0:4]
if keyword == 'YDUPLICATE'[0:4]:
body['Ydupl'] = float(lines.next().split()[0])
elif keyword == 'SCALE'[0:4]:
# Parse body scaling.
tokens = lines.next().split()
body['Xscale'] = float(tokens[0])
body['Yscale'] = float(tokens[1])
body['Zscale'] = float(tokens[2])
elif keyword == 'TRANSLATE'[0:4]:
# Parse body translation.
tokens = lines.next().split()
body['dX'] = float(tokens[0])
body['dY'] = float(tokens[1])
body['dZ'] = float(tokens[2])
elif keyword == 'BFILE'[0:4]:
# Parse body shape filename.
body['bfile'] = self._ParseFilename(lines)
else:
break
return body, line
def main(argv):
# Internal matplotlib functions currently trigger the following
# warnings.
warnings.filterwarnings('ignore', 'elementwise comparison failed; returning '
'scalar instead, but in the future will perform '
'elementwise comparison')
warnings.filterwarnings('ignore', 'comparison to `None` will result in an '
'elementwise object comparison in the future.')
logging.basicConfig(stream=sys.stdout,
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO)
avl = AvlReader(argv[1])
avl.PlotGeometry()
logging.shutdown()
if __name__ == '__main__':
main(sys.argv)
| apache-2.0 |
mfjb/scikit-learn | examples/svm/plot_svm_regression.py | 249 | 1451 | """
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
plt.scatter(X, y, c='k', label='data')
plt.hold('on')
plt.plot(X, y_rbf, c='g', label='RBF model')
plt.plot(X, y_lin, c='r', label='Linear model')
plt.plot(X, y_poly, c='b', label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
| bsd-3-clause |
Anton04/SolarDataRESTfulAPI | SolarDataRESTapi.py | 1 | 16625 | #!/bin/python
from flask import Flask, jsonify, abort,request,Response
import InfluxDBInterface
import json
import IoTtoolkit
#from elasticsearch import Elasticsearch
from ElasticsearchInterface import ESinterface
import os, sys
import time
import pandas as pd
app = Flask(__name__)
app.config.update(dict(
# DATABASE=os.path.join(app.root_path, 'flaskr.db'),
# SERVER_NAME = "livinglab2.powerprojects.se:8080"
# SERVER_NAME = "localhost:8088"
# DEBUG=True
# SECRET_KEY='development key',
# USERNAME='admin',
# PASSWORD='default'
))
def getGeographyData(keys):
path = "/var/www/html/geography/" + "/".join(keys)
print path
if os.path.isfile(path + ".geojson"):
file = open(path + ".geojson","r")
data = json.load(file)
file.close()
return [data]
if not os.path.exists(path):
abort(404)
features =[]
crs = {}
jsontype = {}
for filename in os.listdir(path):
if filename.endswith(".geojson"):
file = open(path + "/" + filename,"r")
data = json.load(file)
file.close()
features += data["features"]
crs = data["crs"]
jsontype = data["type"]
return {"features":features,"crs":crs,"type":jsontype}
def get_parts(path_url):
#Remove trailing slash
if path_url[-1] == "/":
path_url = path_url[:-1]
parts = path_url.split("/")
return parts
#Reformat a influx db time period to pythons sec float.
#Other options for how to specify time durations are
# u for microseconds,
# s for seconds,
# m for minutes,
# h for hours,
# d for days and
# w for weeks.
# If no suffix is given the value is interpreted as microseconds
#
def PeriodToSecs(period):
period = period.encode("ascii","ignore")
if "u" in period:
return float(period.strip("u"))/1000000.0
elif "s" in period:
return float(period.strip("s"))
elif "m" in period:
return float(period.strip("m"))/1000.0
elif "h" in period:
return float(period.strip("h"))*3600.0
elif "d" in period:
return float(period.strip("d"))*3600.0*24
elif "w" in period:
return float(period.strip("w"))*3600.0*24*7
else:
return float(period)/1000000.0
return
def getSolarObjects(keys,Index,DB,Name,subset=["_meta","_production"]):
#Map the keys in the request path to the following properties.
index = ["Country","County","Municipality","Administrative_area","Citypart"]
#Produce the query for elastic search.
query = []
#If now keys are supplied request all entries.
l = len(keys)
if l == 0:
totalquery = {"size":1000,"query": {"match_all": {} }}
#Match each key
else:
for f in range(0,l):
query.append({"match" : {index[f]:keys[f]} })
totalquery = {"size":1000,"query": {"bool": {"must":query} }}
#Request data.
res = es.search(index=Index,doc_type="meta-data", body=totalquery)
print("Got %d Hits:" % res['hits']['total'])
#Get parameters in the request.
tail = request.args.get("tail",1000,type=int)
since = request.args.get("since","now()-7d")
until = request.args.get("until","now()")
lowercase = request.args.get("lowercase","False",type=str)
lowercase = lowercase.lower()
period = request.args.get("period","0",type=str)
fixdata = bool(request.args.get("fixdata","True",type=str))
if lowercase == "true":
lowercase = True
else:
lowercase = False
print "___"*10
print tail, since, until, lowercase
#Avoid doing to large requests
if tail > 10000:
abort(411)
#Process the hits and requests additional data from influxDB
replys = []
for hit in res['hits']['hits']:
siteUUID = hit["_id"]
#Add ID.
reply = {}
reply["_UUID"] = siteUUID
#Meta data.
if "_meta" in subset:
reply["_meta"] = hit["_source"]
reply["_meta"]["UUID"] = siteUUID
if lowercase:
reply["_meta"] = MakeDictLowerCase(reply["_meta"])
#Production.
if "_production" in subset:
if period == "0":
q = ("select * from \"%s\" where time < %s and time > %s limit %i" % (siteUUID,until,since,tail))
print q
try:
data = DB.query(q,'m')
except Exception, err:
if err.message.find("400: Couldn't find series:") != -1:
data = []
else:
raise err
if len(data) > 0:
reply["_production"] = data[0]
reply["_production"].pop("name")
if lowercase:
reply["_production"]["columns"] = MakeListLowerCase(reply["_production"]["columns"])
else:
reply["_production"] = {}
elif period == "daily":
pass
elif period == "montly":
pass
elif period == "yearly":
pass
else:
q= ("select Min(Energy) as Energy from \"%s\" group by time(%s) where time < %s and time > %s limit %i" % (siteUUID,period,until,since,tail))
print q
print "__"*10
df = DB.QueryDf(q,'m')
shorten = 0
if type(df) == type(None):
reply["_production"] = {}
else:
#Add a last value
try:
#print "*"*20
lasttimestamp = df.index.max()
#print lasttimestamp
q2= ("select Max(Energy) as Energy from \"%s\" group by time(%s) where time > %s limit 1 order asc" % (siteUUID,period,lasttimestamp*1000000))
#print q2
df2 = DB.QueryDf(q2,'m')
#print df2
df = pd.concat([df,df2])
if df2.shape[0] > 0:
shorten = 1
except:
print "Error: calculating trailing power value"
#pass
df["Energy_period"] = df["Energy"].diff().shift(-1)
df["Power"] = df["Energy_period"] / (PeriodToSecs(period)/3600.0)
df["Energy_period"].fillna("NULL",inplace = True)
df["Power"].fillna("NULL",inplace = True)
df["Timestamp"] = df.index.to_series()
#Add fixes for delayed data that is accumulated to a single reading.
if fixdata:
#Establish max power
maxpower = float(hit["_source"]["Pmax"])*1000 * 2
#Set everything above maxpower to NaN
df["Power"].loc[df["Power"]>maxpower]=float("NaN")
unpack = df.to_dict("list")
t = unpack["Timestamp"]
e = unpack["Energy"]
e_p = unpack["Energy_period"]
p = unpack["Power"]
points = []
for i in range(0,len(t)-shorten):
points.append([e[i],e_p[i],p[i],t[i]])
reply["_production"] = {"points":points}
reply["_production"]["columns"] = list(df.columns)
if lowercase:
reply["_production"]["columns"] = MakeListLowerCase(reply["_production"]["columns"])
reply["_production"]["UUID"] = siteUUID
#else:
#select max(Energy) as Energy from 46d55815-f927-459f-a8e2-8bbcd88008ee group by time(1h) where time > now() - 60h limit 1000;
# print "Rescale"
# SiteFeed = IoTtoolkit.Feed()
# SiteFeed.AddStream("Power",DB,siteUUID,"Power",Single=False)
# SiteFeed.AddStream("Energy",DB,siteUUID,"Energy",Compressed=True)
#Resample = SiteFeed.GetResampleBuffer(int(since),int(period),int(tail))
#Resample.AddResampleColumn("Power","Energy",Resample.InterpolatePowerFromCounter)
#Resample.AddResampleColumn("Energy","Energy",Resample.InterpolateCounter)
# df = Resample.Interpolate()
# reply["_production"] = df.to_dict()
# reply["_production"]["UUID"] = siteUUID
# print time.time()
#Geography
if "_geography" in subset:
features = []
#Request data.
#query.append({"match" : {index[f]:keys[f]} })
#totalquery = {"size":1000,"query": {"bool": {"must":query} }}
#res = es.search(index=Index,doc_type="geography-data", body=totalquery)
reply["_geography"] = {}
reply["_geography"]["crs"] = {"type":"name","properties":{"name":"urn:ogc:def:crs:EPSG::3011"}}
reply["_geography"]["type"] = "FeatureCollection"
reply["_geography"]["features"] = features
#Skip one level if its just one data source (not counting id).
if len(reply) == 2:
replys.append(reply[reply.keys()[-1]])
else:
replys.append(reply)
return {Name:replys, "_total_hits":res['hits']['total']}
def getSolarObject(uid,Index,DB,Name,subset=["_meta","_production"]):
try:
hit = es.get(index=Index, doc_type="meta-data", id=uid)
except NotFoundError:
abort(404)
#Get parameters in the request.
tail = request.args.get("tail",1000,type=int)
since = request.args.get("since","now()-7d")
until = request.args.get("until","now()")
lowercase = request.args.get("lowercase","False",type=str)
lowercase = lowercase.lower()
if lowercase == "true":
lowercase = True
else:
lowercase = False
print "___"*10
print tail, since, until, lowercase
print type(lowercase)
#Avoid doing to large requests
if tail > 10000:
abort(411)
#Process the hits and requests additional data from influxDB
siteUUID = hit["_id"]
#Add ID.
reply = {}
reply["_UUID"] = siteUUID
#Meta data.
if "_meta" in subset:
reply["_meta"] = hit["_source"]
reply["_meta"]["UUID"] = siteUUID
if lowercase:
reply["_meta"] = MakeDictLowerCase(reply["_meta"])
#Production.
if "_production" in subset:
q = ("select * from \"%s\" where time < %s and time > %s limit %i" % (siteUUID,until,since,tail))
print q
try:
data = DB.query(q,'m')
except Exception, err:
if err.message.find("400: Couldn't find series:") != -1:
data = []
else:
raise err
if len(data) > 0:
reply["_production"] = data[0]
reply["_production"].pop("name")
if lowercase:
reply["_production"]["columns"] = MakeListLowerCase(reply["_production"]["columns"])
else:
reply["_production"] = {}
reply["_production"]["UUID"] = siteUUID
#Geography
if "_geography" in subset:
features = []
#Request data.
#query.append({"match" : {index[f]:keys[f]} })
#totalquery = {"size":1000,"query": {"bool": {"must":query} }}
#res = es.search(index=Index,doc_type="geography-data", body=totalquery)
reply["_geography"] = {}
reply["_geography"]["crs"] = {"type":"name","properties":{"name":"urn:ogc:def:crs:EPSG::3011"}}
reply["_geography"]["type"] = "FeatureCollection"
reply["_geography"]["features"] = features
#Skip one level if its just one data source (not counting id).
if len(reply) == 2:
replys = reply[reply.keys()[-1]]
return {Name:reply}
def MakeDictLowerCase(dictionary):
new_dict = {}
for key in dictionary:
value = dictionary[key]
tkey = type(key)
tvalue = type(value)
#Convert key
if tkey == str or tkey == unicode:
new_key = key.lower()
else:
new_key = key
#Convert value
if tvalue == str or tvalue == unicode:
new_value = value.lower()
else:
new_value = value
new_dict[new_key] = new_value
return new_dict
def MakeListLowerCase(l):
NewList = []
for value in l:
tvalue = type(value)
if tvalue == str or tvalue == unicode:
new_value = value.lower()
else:
new_value = value
NewList.append(new_value)
return NewList
@app.route('/solardata', methods = ['GET'])
def get_index():
return jsonify( { 'tasks': tasks } )
####################
# Get site data #
####################
@app.route('/solardata/sites/<path:path_url>', methods = ['GET'])
def get_site_data(path_url):
parts = get_parts(path_url)
print parts
if parts[-1] == "_meta":
return Response(json.dumps(getSolarObjects(parts[:-1],"solar-sites-index",ProductionDB,"sites",[parts[-1]])), mimetype='application/json') #Respons(json.dumps(getMetadataSites(parts[:-1])), mimetype='application/json')
elif parts[-1] == "_production":
return Response(json.dumps(getSolarObjects(parts[:-1],"solar-sites-index",ProductionDB,"sites",[parts[-1]])), mimetype='application/json') #Response(json.dumps(getProductionDataSites(parts[:-1])), mimetype='application/json')
elif parts[-1] == "_geography":
return "Not implemented"
return Response(json.dumps(getSolarObjects(parts,"solar-sites-index",ProductionDB,"sites")), mimetype='application/json')
####################
# Get area data #
####################
@app.route('/solardata/areas/<path:path_url>', methods = ['GET'])
def get_area_data(path_url):
parts = get_parts(path_url)
print parts
if parts[-1] == "_meta":
return Response(json.dumps(getSolarObjects(parts[:-1],"solar-area-index",AreaDB,"areas",[parts[-1]])), mimetype='application/json')#Response(json.dumps(getMetadataAreas(parts[:-1])), mimetype='application/json')
elif parts[-1] == "_geography":
return Response(json.dumps(getGeographyData(parts[:-1])), mimetype='application/json')
elif parts[-1] == "_production":
return Response(json.dumps(getSolarObjects(parts[:-1],"solar-area-index",AreaDB,"areas",[parts[-1]])), mimetype='application/json')
return Response(json.dumps(getSolarObjects(parts,"solar-area-index",AreaDB,"areas")), mimetype='application/json')
####################
# Get site by id #
####################
@app.route('/solardata/site-by-id/<path:path_url>', methods = ['GET'])
def get_site_by_id_data(path_url):
parts = get_parts(path_url)
print parts
if len(parts) > 2:
abort(404)
if parts[-1] == "_meta":
return Response(json.dumps(getSolarObject(parts[:-1],"solar-sites-index",ProductionDB,"sites",[parts[-1]])), mimetype='application/json') #Respons(json.dumps(getMetadataSites(parts[:-1])), mimetype='application/json')
elif parts[-1] == "_production":
return Response(json.dumps(getSolarObject(parts[:-1],"solar-sites-index",ProductionDB,"sites",[parts[-1]])), mimetype='application/json') #Response(json.dumps(getProductionDataSites(parts[:-1])), mimetype='application/json')
elif parts[-1] == "_geography":
return "Not implemented"
return Response(json.dumps(getSolarObject(parts,"solar-sites-index",ProductionDB,"sites")), mimetype='application/json')
if __name__ == '__main__':
#import InfluxDBInterface
#datalink = InfluxDBInterface.InfluxDBInterface("influxInterfaceCredentials.json")
#InfluxDB interface.
DataLink = InfluxDBInterface.InfluxDBInterface("influxInterfaceCredentials2.json")
LogDB = DataLink.databases[u'SolarLogdata']
ProductionDB = DataLink.databases[u'SolarProductionSites']
AreaDB = DataLink.databases[u'SolarProductionAreas']
#topics = DataLink.listdataseries()
#es = Elasticsearch()
es = ESinterface()
if "debug" in sys.argv:
print "Running in debug mode!"
app.run(debug = True)
else:
app.run(host = "0.0.0.0",port = 8080)
| mit |
francis-liberty/kaggle | Titanic/Viz/single_class.py | 1 | 2557 | # survival rate concerning class, sex and marriage status.
from pandas import Series, DataFrame
import pandas as pd
import matplotlib.pyplot as plt
import pylab
df = pd.read_csv('../Data/train.csv')
# slice
idx_male = df.Sex[df.Sex == 'male'].index
idx_female = df.index.diff(idx_male)
idx_single = df.SibSp[df.SibSp == 0].index
idx_not_single = df.index.diff(idx_single)
idx_survived = df.Survived[df.Survived == 1].index
idx_died = df.index.diff(idx_survived)
idx_class1 = df.Pclass[df.Pclass == 1].index
idx_class2 = df.Pclass[df.Pclass == 2].index
idx_class3 = df.Pclass[df.Pclass == 3].index
idx_f1 = idx_female.intersection(idx_class1) # class-1 female
idx_f2 = idx_female.intersection(idx_class2) # class-2 female
idx_f3 = idx_female.intersection(idx_class3) # class-3 female
idx_sf1s = idx_f1.intersection(idx_single).intersection(idx_survived)
idx_nsf1s = idx_f1.intersection(idx_not_single).intersection(idx_survived)
idx_sf2s = idx_f2.intersection(idx_single).intersection(idx_survived)
idx_nsf2s = idx_f2.intersection(idx_not_single).intersection(idx_survived)
idx_sf3s = idx_f3.intersection(idx_single).intersection(idx_survived)
idx_nsf3s = idx_f3.intersection(idx_not_single).intersection(idx_survived)
xlabels = ['Single', 'Company']
ax = plt.subplot(1, 3, 1)
single_female_class1 = [
float(len(idx_sf1s))/len(idx_f1.intersection(idx_survived)),
float(len(idx_nsf1s))/len(idx_f1.intersection(idx_survived))]
ax.bar(range(2), single_female_class1, align='center', alpha=0.4)
ax.set_xlabel("high class")
pylab.xticks(range(2), xlabels)
ax = plt.subplot(1, 3, 2)
single_female_class2 = [
float(len(idx_sf2s))/len(idx_f2.intersection(idx_survived)),
float(len(idx_nsf2s))/len(idx_f2.intersection(idx_survived))]
ax.bar(range(2), single_female_class2, align='center', alpha=0.4)
ax.set_xlabel("middle class")
pylab.xticks(range(2), xlabels)
ax = plt.subplot(1, 3, 3)
single_female_class3 = [
float(len(idx_sf3s))/len(idx_f3.intersection(idx_survived)),
float(len(idx_nsf3s))/len(idx_f3.intersection(idx_survived))]
ax.bar(range(2), single_female_class3, align='center', alpha=0.4)
ax.set_xlabel("low class")
pylab.xticks(range(2), xlabels)
plt.suptitle("Survival rate for female of different social classes.")
plt.show()
# idx_sm = idx_male.intersection(idx_single) # single male
# idx_sf = idx_female.intersection(idx_single) # single female
# idx_nsm = idx_male.intersection(idx_not_single)
# idx_nsf = idx_female.intersection(idx_not_single)
# def main():
# if __name__ == '__main__':
# main()
| gpl-2.0 |
clemkoa/scikit-learn | doc/conf.py | 8 | 9924 | # -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
import sphinx_gallery
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'numpydoc',
'sphinx.ext.linkcode', 'sphinx.ext.doctest',
'sphinx_gallery.gen_gallery',
'sphinx_issues',
]
# this is needed for some reason...
# see https://github.com/numpy/numpydoc/issues/69
numpydoc_class_members_toctree = False
# pngmath / imgmath compatibility layer for different sphinx versions
import sphinx
from distutils.version import LooseVersion
if LooseVersion(sphinx.__version__) < LooseVersion('1.4'):
extensions.append('sphinx.ext.pngmath')
else:
extensions.append('sphinx.ext.imgmath')
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2007 - 2017, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\usepackage{enumitem} \setlistdepth{10}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
sphinx_gallery_conf = {
'doc_module': 'sklearn',
'backreferences_dir': os.path.join('modules', 'generated'),
'reference_url': {
'sklearn': None,
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.8.1',
'scipy': 'http://docs.scipy.org/doc/scipy-0.13.3/reference'}
}
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'sphx_glr_plot_classifier_comparison_001.png': 600,
'sphx_glr_plot_outlier_detection_003.png': 372,
'sphx_glr_plot_gpr_co2_001.png': 350,
'sphx_glr_plot_adaboost_twoclass_001.png': 372,
'sphx_glr_plot_compare_methods_001.png': 349}
def make_carousel_thumbs(app, exception):
"""produces the final resized carousel images"""
if exception is not None:
return
print('Preparing carousel images')
image_dir = os.path.join(app.builder.outdir, '_images')
for glr_plot, max_width in carousel_thumbs.items():
image = os.path.join(image_dir, glr_plot)
if os.path.exists(image):
c_thumb = os.path.join(image_dir, glr_plot[:-4] + '_carousel.png')
sphinx_gallery.gen_rst.scale_image(image, c_thumb, max_width, 190)
# Config for sphinx_issues
issues_uri = 'https://github.com/scikit-learn/scikit-learn/issues/{issue}'
issues_github_path = 'scikit-learn/scikit-learn'
issues_user_uri = 'https://github.com/{user}'
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.connect('build-finished', make_carousel_thumbs)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
| bsd-3-clause |
dsullivan7/scikit-learn | sklearn/cluster/tests/test_affinity_propagation.py | 341 | 2620 | """
Testing for Clustering methods
"""
import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.cluster.affinity_propagation_ import AffinityPropagation
from sklearn.cluster.affinity_propagation_ import affinity_propagation
from sklearn.datasets.samples_generator import make_blobs
from sklearn.metrics import euclidean_distances
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=60, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=0)
def test_affinity_propagation():
# Affinity Propagation algorithm
# Compute similarities
S = -euclidean_distances(X, squared=True)
preference = np.median(S) * 10
# Compute Affinity Propagation
cluster_centers_indices, labels = affinity_propagation(
S, preference=preference)
n_clusters_ = len(cluster_centers_indices)
assert_equal(n_clusters, n_clusters_)
af = AffinityPropagation(preference=preference, affinity="precomputed")
labels_precomputed = af.fit(S).labels_
af = AffinityPropagation(preference=preference, verbose=True)
labels = af.fit(X).labels_
assert_array_equal(labels, labels_precomputed)
cluster_centers_indices = af.cluster_centers_indices_
n_clusters_ = len(cluster_centers_indices)
assert_equal(np.unique(labels).size, n_clusters_)
assert_equal(n_clusters, n_clusters_)
# Test also with no copy
_, labels_no_copy = affinity_propagation(S, preference=preference,
copy=False)
assert_array_equal(labels, labels_no_copy)
# Test input validation
assert_raises(ValueError, affinity_propagation, S[:, :-1])
assert_raises(ValueError, affinity_propagation, S, damping=0)
af = AffinityPropagation(affinity="unknown")
assert_raises(ValueError, af.fit, X)
def test_affinity_propagation_predict():
# Test AffinityPropagation.predict
af = AffinityPropagation(affinity="euclidean")
labels = af.fit_predict(X)
labels2 = af.predict(X)
assert_array_equal(labels, labels2)
def test_affinity_propagation_predict_error():
# Test exception in AffinityPropagation.predict
# Not fitted.
af = AffinityPropagation(affinity="euclidean")
assert_raises(ValueError, af.predict, X)
# Predict not supported when affinity="precomputed".
S = np.dot(X, X.T)
af = AffinityPropagation(affinity="precomputed")
af.fit(S)
assert_raises(ValueError, af.predict, X)
| bsd-3-clause |
zaxtax/scikit-learn | sklearn/neighbors/approximate.py | 40 | 22369 | """Approximate nearest neighbor search"""
# Author: Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk>
# Joel Nothman <joel.nothman@gmail.com>
import numpy as np
import warnings
from scipy import sparse
from .base import KNeighborsMixin, RadiusNeighborsMixin
from ..base import BaseEstimator
from ..utils.validation import check_array
from ..utils import check_random_state
from ..metrics.pairwise import pairwise_distances
from ..random_projection import GaussianRandomProjection
__all__ = ["LSHForest"]
HASH_DTYPE = '>u4'
MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8
def _find_matching_indices(tree, bin_X, left_mask, right_mask):
"""Finds indices in sorted array of integers.
Most significant h bits in the binary representations of the
integers are matched with the items' most significant h bits.
"""
left_index = np.searchsorted(tree, bin_X & left_mask)
right_index = np.searchsorted(tree, bin_X | right_mask,
side='right')
return left_index, right_index
def _find_longest_prefix_match(tree, bin_X, hash_size,
left_masks, right_masks):
"""Find the longest prefix match in tree for each query in bin_X
Most significant bits are considered as the prefix.
"""
hi = np.empty_like(bin_X, dtype=np.intp)
hi.fill(hash_size)
lo = np.zeros_like(bin_X, dtype=np.intp)
res = np.empty_like(bin_X, dtype=np.intp)
left_idx, right_idx = _find_matching_indices(tree, bin_X,
left_masks[hi],
right_masks[hi])
found = right_idx > left_idx
res[found] = lo[found] = hash_size
r = np.arange(bin_X.shape[0])
kept = r[lo < hi] # indices remaining in bin_X mask
while kept.shape[0]:
mid = (lo.take(kept) + hi.take(kept)) // 2
left_idx, right_idx = _find_matching_indices(tree,
bin_X.take(kept),
left_masks[mid],
right_masks[mid])
found = right_idx > left_idx
mid_found = mid[found]
lo[kept[found]] = mid_found + 1
res[kept[found]] = mid_found
hi[kept[~found]] = mid[~found]
kept = r[lo < hi]
return res
class ProjectionToHashMixin(object):
"""Turn a transformed real-valued array into a hash"""
@staticmethod
def _to_hash(projected):
if projected.shape[1] % 8 != 0:
raise ValueError('Require reduced dimensionality to be a multiple '
'of 8 for hashing')
# XXX: perhaps non-copying operation better
out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE)
return out.reshape(projected.shape[0], -1)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def transform(self, X, y=None):
return self._to_hash(super(ProjectionToHashMixin, self).transform(X))
class GaussianRandomProjectionHash(ProjectionToHashMixin,
GaussianRandomProjection):
"""Use GaussianRandomProjection to produce a cosine LSH fingerprint"""
def __init__(self,
n_components=8,
random_state=None):
super(GaussianRandomProjectionHash, self).__init__(
n_components=n_components,
random_state=random_state)
def _array_of_arrays(list_of_arrays):
"""Creates an array of array from list of arrays."""
out = np.empty(len(list_of_arrays), dtype=object)
out[:] = list_of_arrays
return out
class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin):
"""Performs approximate nearest neighbor search using LSH forest.
LSH Forest: Locality Sensitive Hashing forest [1] is an alternative
method for vanilla approximate nearest neighbor search methods.
LSH forest data structure has been implemented using sorted
arrays and binary search and 32 bit fixed-length hashes.
Random projection is used as the hash family which approximates
cosine distance.
The cosine distance is defined as ``1 - cosine_similarity``: the lowest
value is 0 (identical point) but it is bounded above by 2 for the farthest
points. Its value does not depend on the norm of the vector points but
only on their relative angles.
Read more in the :ref:`User Guide <approximate_nearest_neighbors>`.
Parameters
----------
n_estimators : int (default = 10)
Number of trees in the LSH Forest.
min_hash_match : int (default = 4)
lowest hash length to be searched when candidate selection is
performed for nearest neighbors.
n_candidates : int (default = 10)
Minimum number of candidates evaluated per estimator, assuming enough
items meet the `min_hash_match` constraint.
n_neighbors : int (default = 5)
Number of neighbors to be returned from query function when
it is not provided to the :meth:`kneighbors` method.
radius : float, optinal (default = 1.0)
Radius from the data point to its neighbors. This is the parameter
space to use by default for the :meth`radius_neighbors` queries.
radius_cutoff_ratio : float, optional (default = 0.9)
A value ranges from 0 to 1. Radius neighbors will be searched until
the ratio between total neighbors within the radius and the total
candidates becomes less than this value unless it is terminated by
hash length reaching `min_hash_match`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
hash_functions_ : list of GaussianRandomProjectionHash objects
Hash function g(p,x) for a tree is an array of 32 randomly generated
float arrays with the same dimension as the data set. This array is
stored in GaussianRandomProjectionHash object and can be obtained
from ``components_`` attribute.
trees_ : array, shape (n_estimators, n_samples)
Each tree (corresponding to a hash function) contains an array of
sorted hashed values. The array representation may change in future
versions.
original_indices_ : array, shape (n_estimators, n_samples)
Original indices of sorted hashed values in the fitted index.
References
----------
.. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning
Indexes for Similarity Search", WWW '05 Proceedings of the
14th international conference on World Wide Web, 651-660,
2005.
Examples
--------
>>> from sklearn.neighbors import LSHForest
>>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]]
>>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]]
>>> lshf = LSHForest(random_state=42)
>>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE
LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10,
n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9,
random_state=42)
>>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2)
>>> distances # doctest: +ELLIPSIS
array([[ 0.069..., 0.149...],
[ 0.229..., 0.481...],
[ 0.004..., 0.014...]])
>>> indices
array([[1, 2],
[2, 0],
[4, 0]])
"""
def __init__(self, n_estimators=10, radius=1.0, n_candidates=50,
n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9,
random_state=None):
self.n_estimators = n_estimators
self.radius = radius
self.random_state = random_state
self.n_candidates = n_candidates
self.n_neighbors = n_neighbors
self.min_hash_match = min_hash_match
self.radius_cutoff_ratio = radius_cutoff_ratio
def _compute_distances(self, query, candidates):
"""Computes the cosine distance.
Distance is from the query to points in the candidates array.
Returns argsort of distances in the candidates
array and sorted distances.
"""
if candidates.shape == (0,):
# needed since _fit_X[np.array([])] doesn't work if _fit_X sparse
return np.empty(0, dtype=np.int), np.empty(0, dtype=float)
if sparse.issparse(self._fit_X):
candidate_X = self._fit_X[candidates]
else:
candidate_X = self._fit_X.take(candidates, axis=0, mode='clip')
distances = pairwise_distances(query, candidate_X,
metric='cosine')[0]
distance_positions = np.argsort(distances)
distances = distances.take(distance_positions, mode='clip', axis=0)
return distance_positions, distances
def _generate_masks(self):
"""Creates left and right masks for all hash lengths."""
tri_size = MAX_HASH_SIZE + 1
# Called once on fitting, output is independent of hashes
left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:]
right_mask = left_mask[::-1, ::-1]
self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE)
self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
"""Performs the Synchronous ascending phase.
Returns an array of candidates, their distance ranks and
distances.
"""
index_size = self._fit_X.shape[0]
# Number of candidates considered including duplicates
# XXX: not sure whether this is being calculated correctly wrt
# duplicates from different iterations through a single tree
n_candidates = 0
candidate_set = set()
min_candidates = self.n_candidates * self.n_estimators
while (max_depth > self.min_hash_match and
(n_candidates < min_candidates or
len(candidate_set) < n_neighbors)):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
n_candidates += stop - start
candidate_set.update(
self.original_indices_[i][start:stop].tolist())
max_depth -= 1
candidates = np.fromiter(candidate_set, count=len(candidate_set),
dtype=np.intp)
# For insufficient candidates, candidates are filled.
# Candidates are filled from unselected indices uniformly.
if candidates.shape[0] < n_neighbors:
warnings.warn(
"Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (n_neighbors, self.min_hash_match))
remaining = np.setdiff1d(np.arange(0, index_size), candidates)
to_fill = n_neighbors - candidates.shape[0]
candidates = np.concatenate((candidates, remaining[:to_fill]))
ranks, distances = self._compute_distances(query,
candidates.astype(int))
return (candidates[ranks[:n_neighbors]],
distances[:n_neighbors])
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
"""Finds radius neighbors from the candidates obtained.
Their distances from query are smaller than radius.
Returns radius neighbors and distances.
"""
ratio_within_radius = 1
threshold = 1 - self.radius_cutoff_ratio
total_candidates = np.array([], dtype=int)
total_neighbors = np.array([], dtype=int)
total_distances = np.array([], dtype=float)
while (max_depth > self.min_hash_match and
ratio_within_radius > threshold):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
candidates = []
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
candidates.extend(
self.original_indices_[i][start:stop].tolist())
candidates = np.setdiff1d(candidates, total_candidates)
total_candidates = np.append(total_candidates, candidates)
ranks, distances = self._compute_distances(query, candidates)
m = np.searchsorted(distances, radius, side='right')
positions = np.searchsorted(total_distances, distances[:m])
total_neighbors = np.insert(total_neighbors, positions,
candidates[ranks[:m]])
total_distances = np.insert(total_distances, positions,
distances[:m])
ratio_within_radius = (total_neighbors.shape[0] /
float(total_candidates.shape[0]))
max_depth = max_depth - 1
return total_neighbors, total_distances
def fit(self, X, y=None):
"""Fit the LSH forest on the data.
This creates binary hashes of input data points by getting the
dot product of input points and hash_function then
transforming the projection into a binary string array based
on the sign (positive/negative) of the projection.
A sorted array of binary hashes is created.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self : object
Returns self.
"""
self._fit_X = check_array(X, accept_sparse='csr')
# Creates a g(p,x) for each tree
self.hash_functions_ = []
self.trees_ = []
self.original_indices_ = []
rng = check_random_state(self.random_state)
int_max = np.iinfo(np.int32).max
for i in range(self.n_estimators):
# This is g(p,x) for a particular tree.
# Builds a single tree. Hashing is done on an array of data points.
# `GaussianRandomProjection` is used for hashing.
# `n_components=hash size and n_features=n_dim.
hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE,
rng.randint(0, int_max))
hashes = hasher.fit_transform(self._fit_X)[:, 0]
original_index = np.argsort(hashes)
bin_hashes = hashes[original_index]
self.original_indices_.append(original_index)
self.trees_.append(bin_hashes)
self.hash_functions_.append(hasher)
self._generate_masks()
return self
def _query(self, X):
"""Performs descending phase to find maximum depth."""
# Calculate hashes of shape (n_samples, n_estimators, [hash_size])
bin_queries = np.asarray([hasher.transform(X)[:, 0]
for hasher in self.hash_functions_])
bin_queries = np.rollaxis(bin_queries, 1)
# descend phase
depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE,
self._left_mask, self._right_mask)
for tree, tree_queries in zip(self.trees_,
np.rollaxis(bin_queries, 1))]
return bin_queries, np.max(depths, axis=0)
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Returns n_neighbors of approximate nearest neighbors.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
n_neighbors : int, opitonal (default = None)
Number of neighbors required. If not provided, this will
return the number specified at the initialization.
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples, n_neighbors)
Array representing the cosine distances to each point,
only present if return_distance=True.
ind : array, shape (n_samples, n_neighbors)
Indices of the approximate nearest points in the population
matrix.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_candidates(X[[i]], max_depth[i],
bin_queries[i],
n_neighbors)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return np.array(distances), np.array(neighbors)
else:
return np.array(neighbors)
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of some points from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
LSH Forest being an approximate method, some true neighbors from the
indexed dataset might be missing from the results.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples,) of arrays
Each element is an array representing the cosine distances
to some points found within ``radius`` of the respective query.
Only present if ``return_distance=True``.
ind : array, shape (n_samples,) of arrays
Each element is an array of indices for neighbors within ``radius``
of the respective query.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if radius is None:
radius = self.radius
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_radius_neighbors(X[[i]], max_depth[i],
bin_queries[i], radius)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return _array_of_arrays(distances), _array_of_arrays(neighbors)
else:
return _array_of_arrays(neighbors)
def partial_fit(self, X, y=None):
"""
Inserts new data into the already fitted LSH Forest.
Cost is proportional to new total size, so additions
should be batched.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
New data point to be inserted into the LSH Forest.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, 'hash_functions_'):
return self.fit(X)
if X.shape[1] != self._fit_X.shape[1]:
raise ValueError("Number of features in X and"
" fitted array does not match.")
n_samples = X.shape[0]
n_indexed = self._fit_X.shape[0]
for i in range(self.n_estimators):
bin_X = self.hash_functions_[i].transform(X)[:, 0]
# gets the position to be added in the tree.
positions = self.trees_[i].searchsorted(bin_X)
# adds the hashed value into the tree.
self.trees_[i] = np.insert(self.trees_[i],
positions, bin_X)
# add the entry into the original_indices_.
self.original_indices_[i] = np.insert(self.original_indices_[i],
positions,
np.arange(n_indexed,
n_indexed +
n_samples))
# adds the entry into the input_array.
if sparse.issparse(X) or sparse.issparse(self._fit_X):
self._fit_X = sparse.vstack((self._fit_X, X))
else:
self._fit_X = np.row_stack((self._fit_X, X))
return self
| bsd-3-clause |
jorik041/scikit-learn | examples/exercises/plot_cv_diabetes.py | 231 | 2527 | """
===============================================
Cross-validation on diabetes Dataset Exercise
===============================================
A tutorial exercise which uses cross-validation with linear models.
This exercise is used in the :ref:`cv_estimators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
from __future__ import print_function
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation, datasets, linear_model
diabetes = datasets.load_diabetes()
X = diabetes.data[:150]
y = diabetes.target[:150]
lasso = linear_model.Lasso()
alphas = np.logspace(-4, -.5, 30)
scores = list()
scores_std = list()
for alpha in alphas:
lasso.alpha = alpha
this_scores = cross_validation.cross_val_score(lasso, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
plt.figure(figsize=(4, 3))
plt.semilogx(alphas, scores)
# plot error lines showing +/- std. errors of the scores
plt.semilogx(alphas, np.array(scores) + np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.semilogx(alphas, np.array(scores) - np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.ylabel('CV score')
plt.xlabel('alpha')
plt.axhline(np.max(scores), linestyle='--', color='.5')
##############################################################################
# Bonus: how much can you trust the selection of alpha?
# To answer this question we use the LassoCV object that sets its alpha
# parameter automatically from the data by internal cross-validation (i.e. it
# performs cross-validation on the training data it receives).
# We use external cross-validation to see how much the automatically obtained
# alphas differ across different cross-validation folds.
lasso_cv = linear_model.LassoCV(alphas=alphas)
k_fold = cross_validation.KFold(len(X), 3)
print("Answer to the bonus question:",
"how much can you trust the selection of alpha?")
print()
print("Alpha parameters maximising the generalization score on different")
print("subsets of the data:")
for k, (train, test) in enumerate(k_fold):
lasso_cv.fit(X[train], y[train])
print("[fold {0}] alpha: {1:.5f}, score: {2:.5f}".
format(k, lasso_cv.alpha_, lasso_cv.score(X[test], y[test])))
print()
print("Answer: Not very much since we obtained different alphas for different")
print("subsets of the data and moreover, the scores for these alphas differ")
print("quite substantially.")
plt.show()
| bsd-3-clause |
indashnet/InDashNet.Open.UN2000 | android/external/chromium_org/chrome/test/nacl_test_injection/buildbot_chrome_nacl_stage.py | 24 | 10036 | #!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Do all the steps required to build and test against nacl."""
import optparse
import os.path
import re
import shutil
import subprocess
import sys
import find_chrome
# Copied from buildbot/buildbot_lib.py
def TryToCleanContents(path, file_name_filter=lambda fn: True):
"""
Remove the contents of a directory without touching the directory itself.
Ignores all failures.
"""
if os.path.exists(path):
for fn in os.listdir(path):
TryToCleanPath(os.path.join(path, fn), file_name_filter)
# Copied from buildbot/buildbot_lib.py
def TryToCleanPath(path, file_name_filter=lambda fn: True):
"""
Removes a file or directory.
Ignores all failures.
"""
if os.path.exists(path):
if file_name_filter(path):
print 'Trying to remove %s' % path
if os.path.isdir(path):
shutil.rmtree(path, ignore_errors=True)
else:
try:
os.remove(path)
except Exception:
pass
else:
print 'Skipping %s' % path
# TODO(ncbray): this is somewhat unsafe. We should fix the underlying problem.
def CleanTempDir():
# Only delete files and directories like:
# a) C:\temp\83C4.tmp
# b) /tmp/.org.chromium.Chromium.EQrEzl
file_name_re = re.compile(
r'[\\/]([0-9a-fA-F]+\.tmp|\.org\.chrom\w+\.Chrom\w+\..+)$')
file_name_filter = lambda fn: file_name_re.search(fn) is not None
path = os.environ.get('TMP', os.environ.get('TEMP', '/tmp'))
if len(path) >= 4 and os.path.isdir(path):
print
print "Cleaning out the temp directory."
print
TryToCleanContents(path, file_name_filter)
else:
print
print "Cannot find temp directory, not cleaning it."
print
def RunCommand(cmd, cwd, env):
sys.stdout.write('\nRunning %s\n\n' % ' '.join(cmd))
sys.stdout.flush()
retcode = subprocess.call(cmd, cwd=cwd, env=env)
if retcode != 0:
sys.stdout.write('\nFailed: %s\n\n' % ' '.join(cmd))
sys.exit(retcode)
def RunTests(name, cmd, nacl_dir, env):
sys.stdout.write('\n\nBuilding files needed for %s testing...\n\n' % name)
RunCommand(cmd + ['do_not_run_tests=1', '-j8'], nacl_dir, env)
sys.stdout.write('\n\nRunning %s tests...\n\n' % name)
RunCommand(cmd, nacl_dir, env)
def BuildAndTest(options):
# Refuse to run under cygwin.
if sys.platform == 'cygwin':
raise Exception('I do not work under cygwin, sorry.')
# By default, use the version of Python is being used to run this script.
python = sys.executable
if sys.platform == 'darwin':
# Mac 10.5 bots tend to use a particularlly old version of Python, look for
# a newer version.
macpython27 = '/Library/Frameworks/Python.framework/Versions/2.7/bin/python'
if os.path.exists(macpython27):
python = macpython27
script_dir = os.path.dirname(os.path.abspath(__file__))
src_dir = os.path.dirname(os.path.dirname(os.path.dirname(script_dir)))
nacl_dir = os.path.join(src_dir, 'native_client')
# Decide platform specifics.
env = dict(os.environ)
if sys.platform in ['win32', 'cygwin']:
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
elif '64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or \
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''):
bits = 64
else:
bits = 32
msvs_path = ';'.join([
r'c:\Program Files\Microsoft Visual Studio 9.0\VC',
r'c:\Program Files (x86)\Microsoft Visual Studio 9.0\VC',
r'c:\Program Files\Microsoft Visual Studio 9.0\Common7\Tools',
r'c:\Program Files (x86)\Microsoft Visual Studio 9.0\Common7\Tools',
r'c:\Program Files\Microsoft Visual Studio 8\VC',
r'c:\Program Files (x86)\Microsoft Visual Studio 8\VC',
r'c:\Program Files\Microsoft Visual Studio 8\Common7\Tools',
r'c:\Program Files (x86)\Microsoft Visual Studio 8\Common7\Tools',
])
env['PATH'] += ';' + msvs_path
scons = [python, 'scons.py']
elif sys.platform == 'darwin':
bits = 32
scons = [python, 'scons.py']
else:
p = subprocess.Popen(
'uname -m | '
'sed -e "s/i.86/ia32/;s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/"',
shell=True, stdout=subprocess.PIPE)
(p_stdout, _) = p.communicate()
assert p.returncode == 0
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
elif p_stdout.find('64') >= 0:
bits = 64
else:
bits = 32
# xvfb-run has a 2-second overhead per invocation, so it is cheaper to wrap
# the entire build step rather than each test (browser_headless=1).
scons = ['xvfb-run', '--auto-servernum', python, 'scons.py']
if options.browser_path:
chrome_filename = options.browser_path
else:
chrome_filename = find_chrome.FindChrome(src_dir, [options.mode])
if chrome_filename is None:
raise Exception('Cannot find a chome binary - specify one with '
'--browser_path?')
if options.jobs > 1:
scons.append('-j%d' % options.jobs)
scons.append('disable_tests=%s' % options.disable_tests)
if options.buildbot is not None:
scons.append('buildbot=%s' % (options.buildbot,))
# Clean the output of the previous build.
# Incremental builds can get wedged in weird ways, so we're trading speed
# for reliability.
shutil.rmtree(os.path.join(nacl_dir, 'scons-out'), True)
# check that the HOST (not target) is 64bit
# this is emulating what msvs_env.bat is doing
if '64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or \
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''):
# 64bit HOST
env['VS90COMNTOOLS'] = ('c:\\Program Files (x86)\\'
'Microsoft Visual Studio 9.0\\Common7\\Tools\\')
env['VS80COMNTOOLS'] = ('c:\\Program Files (x86)\\'
'Microsoft Visual Studio 8.0\\Common7\\Tools\\')
else:
# 32bit HOST
env['VS90COMNTOOLS'] = ('c:\\Program Files\\Microsoft Visual Studio 9.0\\'
'Common7\\Tools\\')
env['VS80COMNTOOLS'] = ('c:\\Program Files\\Microsoft Visual Studio 8.0\\'
'Common7\\Tools\\')
# Run nacl/chrome integration tests.
# Note that we have to add nacl_irt_test to --mode in order to get
# inbrowser_test_runner to run.
# TODO(mseaborn): Change it so that inbrowser_test_runner is not a
# special case.
cmd = scons + ['--verbose', '-k', 'platform=x86-%d' % bits,
'--mode=opt-host,nacl,nacl_irt_test',
'chrome_browser_path=%s' % chrome_filename,
]
if not options.integration_bot and not options.morenacl_bot:
cmd.append('disable_flaky_tests=1')
cmd.append('chrome_browser_tests')
# Download the toolchain(s).
RunCommand([python,
os.path.join(nacl_dir, 'build', 'download_toolchains.py'),
'--no-arm-trusted', '--no-pnacl', 'TOOL_REVISIONS'],
nacl_dir, os.environ)
CleanTempDir()
if options.enable_newlib:
RunTests('nacl-newlib', cmd, nacl_dir, env)
if options.enable_glibc:
RunTests('nacl-glibc', cmd + ['--nacl_glibc'], nacl_dir, env)
def MakeCommandLineParser():
parser = optparse.OptionParser()
parser.add_option('-m', '--mode', dest='mode', default='Debug',
help='Debug/Release mode')
parser.add_option('-j', dest='jobs', default=1, type='int',
help='Number of parallel jobs')
parser.add_option('--enable_newlib', dest='enable_newlib', default=-1,
type='int', help='Run newlib tests?')
parser.add_option('--enable_glibc', dest='enable_glibc', default=-1,
type='int', help='Run glibc tests?')
# Deprecated, but passed to us by a script in the Chrome repo.
# Replaced by --enable_glibc=0
parser.add_option('--disable_glibc', dest='disable_glibc',
action='store_true', default=False,
help='Do not test using glibc.')
parser.add_option('--disable_tests', dest='disable_tests',
type='string', default='',
help='Comma-separated list of tests to omit')
builder_name = os.environ.get('BUILDBOT_BUILDERNAME', '')
is_integration_bot = 'nacl-chrome' in builder_name
parser.add_option('--integration_bot', dest='integration_bot',
type='int', default=int(is_integration_bot),
help='Is this an integration bot?')
is_morenacl_bot = (
'More NaCl' in builder_name or
'naclmore' in builder_name)
parser.add_option('--morenacl_bot', dest='morenacl_bot',
type='int', default=int(is_morenacl_bot),
help='Is this a morenacl bot?')
# Not used on the bots, but handy for running the script manually.
parser.add_option('--bits', dest='bits', action='store',
type='int', default=None,
help='32/64')
parser.add_option('--browser_path', dest='browser_path', action='store',
type='string', default=None,
help='Path to the chrome browser.')
parser.add_option('--buildbot', dest='buildbot', action='store',
type='string', default=None,
help='Value passed to scons as buildbot= option.')
return parser
def Main():
parser = MakeCommandLineParser()
options, args = parser.parse_args()
if options.integration_bot and options.morenacl_bot:
parser.error('ERROR: cannot be both an integration bot and a morenacl bot')
# Set defaults for enabling newlib.
if options.enable_newlib == -1:
options.enable_newlib = 1
# Set defaults for enabling glibc.
if options.enable_glibc == -1:
if options.integration_bot or options.morenacl_bot:
options.enable_glibc = 1
else:
options.enable_glibc = 0
if args:
parser.error('ERROR: invalid argument')
BuildAndTest(options)
if __name__ == '__main__':
Main()
| apache-2.0 |
Windy-Ground/scikit-learn | examples/cluster/plot_kmeans_silhouette_analysis.py | 242 | 5885 | """
===============================================================================
Selecting the number of clusters with silhouette analysis on KMeans clustering
===============================================================================
Silhouette analysis can be used to study the separation distance between the
resulting clusters. The silhouette plot displays a measure of how close each
point in one cluster is to points in the neighboring clusters and thus provides
a way to assess parameters like number of clusters visually. This measure has a
range of [-1, 1].
Silhoette coefficients (as these values are referred to as) near +1 indicate
that the sample is far away from the neighboring clusters. A value of 0
indicates that the sample is on or very close to the decision boundary between
two neighboring clusters and negative values indicate that those samples might
have been assigned to the wrong cluster.
In this example the silhouette analysis is used to choose an optimal value for
``n_clusters``. The silhouette plot shows that the ``n_clusters`` value of 3, 5
and 6 are a bad pick for the given data due to the presence of clusters with
below average silhouette scores and also due to wide fluctuations in the size
of the silhouette plots. Silhouette analysis is more ambivalent in deciding
between 2 and 4.
Also from the thickness of the silhouette plot the cluster size can be
visualized. The silhouette plot for cluster 0 when ``n_clusters`` is equal to
2, is bigger in size owing to the grouping of the 3 sub clusters into one big
cluster. However when the ``n_clusters`` is equal to 4, all the plots are more
or less of similar thickness and hence are of similar sizes as can be also
verified from the labelled scatter plot on the right.
"""
from __future__ import print_function
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
print(__doc__)
# Generating the sample data from make_blobs
# This particular setting has one distict cluster and 3 clusters placed close
# together.
X, y = make_blobs(n_samples=500,
n_features=2,
centers=4,
cluster_std=1,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=1) # For reproducibility
range_n_clusters = [2, 3, 4, 5, 6]
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhoutte score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors)
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=200)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
| bsd-3-clause |
madjelan/scikit-learn | examples/linear_model/plot_lasso_model_selection.py | 311 | 5431 | """
===================================================
Lasso model selection: Cross-Validation / AIC / BIC
===================================================
Use the Akaike information criterion (AIC), the Bayes Information
criterion (BIC) and cross-validation to select an optimal value
of the regularization parameter alpha of the :ref:`lasso` estimator.
Results obtained with LassoLarsIC are based on AIC/BIC criteria.
Information-criterion based model selection is very fast, but it
relies on a proper estimation of degrees of freedom, are
derived for large samples (asymptotic results) and assume the model
is correct, i.e. that the data are actually generated by this model.
They also tend to break when the problem is badly conditioned
(more features than samples).
For cross-validation, we use 20-fold with 2 algorithms to compute the
Lasso path: coordinate descent, as implemented by the LassoCV class, and
Lars (least angle regression) as implemented by the LassoLarsCV class.
Both algorithms give roughly the same results. They differ with regards
to their execution speed and sources of numerical errors.
Lars computes a path solution only for each kink in the path. As a
result, it is very efficient when there are only of few kinks, which is
the case if there are few features or samples. Also, it is able to
compute the full path without setting any meta parameter. On the
opposite, coordinate descent compute the path points on a pre-specified
grid (here we use the default). Thus it is more efficient if the number
of grid points is smaller than the number of kinks in the path. Such a
strategy can be interesting if the number of features is really large
and there are enough samples to select a large amount. In terms of
numerical errors, for heavily correlated variables, Lars will accumulate
more errors, while the coordinate descent algorithm will only sample the
path on a grid.
Note how the optimal value of alpha varies for each fold. This
illustrates why nested-cross validation is necessary when trying to
evaluate the performance of a method for which a parameter is chosen by
cross-validation: this choice of parameter may not be optimal for unseen
data.
"""
print(__doc__)
# Author: Olivier Grisel, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LassoCV, LassoLarsCV, LassoLarsIC
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
rng = np.random.RandomState(42)
X = np.c_[X, rng.randn(X.shape[0], 14)] # add some bad features
# normalize data as done by Lars to allow for comparison
X /= np.sqrt(np.sum(X ** 2, axis=0))
##############################################################################
# LassoLarsIC: least angle regression with BIC/AIC criterion
model_bic = LassoLarsIC(criterion='bic')
t1 = time.time()
model_bic.fit(X, y)
t_bic = time.time() - t1
alpha_bic_ = model_bic.alpha_
model_aic = LassoLarsIC(criterion='aic')
model_aic.fit(X, y)
alpha_aic_ = model_aic.alpha_
def plot_ic_criterion(model, name, color):
alpha_ = model.alpha_
alphas_ = model.alphas_
criterion_ = model.criterion_
plt.plot(-np.log10(alphas_), criterion_, '--', color=color,
linewidth=3, label='%s criterion' % name)
plt.axvline(-np.log10(alpha_), color=color, linewidth=3,
label='alpha: %s estimate' % name)
plt.xlabel('-log(alpha)')
plt.ylabel('criterion')
plt.figure()
plot_ic_criterion(model_aic, 'AIC', 'b')
plot_ic_criterion(model_bic, 'BIC', 'r')
plt.legend()
plt.title('Information-criterion for model selection (training time %.3fs)'
% t_bic)
##############################################################################
# LassoCV: coordinate descent
# Compute paths
print("Computing regularization path using the coordinate descent lasso...")
t1 = time.time()
model = LassoCV(cv=20).fit(X, y)
t_lasso_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.alphas_)
plt.figure()
ymin, ymax = 2300, 3800
plt.plot(m_log_alphas, model.mse_path_, ':')
plt.plot(m_log_alphas, model.mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha: CV estimate')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: coordinate descent '
'(train time: %.2fs)' % t_lasso_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
##############################################################################
# LassoLarsCV: least angle regression
# Compute paths
print("Computing regularization path using the Lars lasso...")
t1 = time.time()
model = LassoLarsCV(cv=20).fit(X, y)
t_lasso_lars_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.cv_alphas_)
plt.figure()
plt.plot(m_log_alphas, model.cv_mse_path_, ':')
plt.plot(m_log_alphas, model.cv_mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha CV')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: Lars (train time: %.2fs)'
% t_lasso_lars_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
plt.show()
| bsd-3-clause |
PawarPawan/h2o-v3 | py2/h2o_cmd.py | 20 | 16497 |
import h2o_nodes
from h2o_test import dump_json, verboseprint
import h2o_util
import h2o_print as h2p
from h2o_test import OutputObj
#************************************************************************
def runStoreView(node=None, **kwargs):
print "FIX! disabling runStoreView for now"
return {}
if not node: node = h2o_nodes.nodes[0]
print "\nStoreView:"
# FIX! are there keys other than frames and models
a = node.frames(**kwargs)
# print "storeview frames:", dump_json(a)
frameList = [af['key']['name'] for af in a['frames']]
for f in frameList:
print "frame:", f
print "# of frames:", len(frameList)
b = node.models()
# print "storeview models:", dump_json(b)
modelList = [bm['key'] for bm in b['models']]
for m in modelList:
print "model:", m
print "# of models:", len(modelList)
return {'keys': frameList + modelList}
#************************************************************************
def runExec(node=None, **kwargs):
if not node: node = h2o_nodes.nodes[0]
a = node.rapids(**kwargs)
return a
def runInspect(node=None, key=None, verbose=False, **kwargs):
if not key: raise Exception('No key for Inspect')
if not node: node = h2o_nodes.nodes[0]
a = node.frames(key, **kwargs)
if verbose:
print "inspect of %s:" % key, dump_json(a)
return a
#************************************************************************
def infoFromParse(parse):
if not parse:
raise Exception("parse is empty for infoFromParse")
# assumes just one result from Frames
if 'frames' not in parse:
raise Exception("infoFromParse expects parse= param from parse result: %s" % parse)
if len(parse['frames'])!=1:
raise Exception("infoFromParse expects parse= param from parse result: %s " % parse['frames'])
# it it index[0] or key '0' in a dictionary?
frame = parse['frames'][0]
# need more info about this dataset for debug
numCols = len(frame['columns'])
numRows = frame['rows']
key_name = frame['frame_id']['name']
return numRows, numCols, key_name
#************************************************************************
# make this be the basic way to get numRows, numCols
def infoFromInspect(inspect):
if not inspect:
raise Exception("inspect is empty for infoFromInspect")
# assumes just one result from Frames
if 'frames' not in inspect:
raise Exception("infoFromInspect expects inspect= param from Frames result (single): %s" % inspect)
if len(inspect['frames'])!=1:
raise Exception("infoFromInspect expects inspect= param from Frames result (single): %s " % inspect['frames'])
# it it index[0] or key '0' in a dictionary?
frame = inspect['frames'][0]
# need more info about this dataset for debug
columns = frame['columns']
key_name = frame['frame_id']['name']
missingList = []
labelList = []
typeList = []
for i, colDict in enumerate(columns): # columns is a list
if 'missing_count' not in colDict:
# debug
print "\ncolDict"
for k in colDict:
print " key: %s" % k
# data
# domain
# string_data
# type
# label
# percentiles
# precision
# mins
# maxs
# mean
# histogram_base
# histogram_bins
# histogram_stride
# zero_count
# missing_count
# positive_infinity_count
# negative_infinity_count
# __meta
mins = colDict['mins']
maxs = colDict['maxs']
missing = colDict['missing_count']
label = colDict['label']
stype = colDict['type']
missingList.append(missing)
labelList.append(label)
typeList.append(stype)
if missing!=0:
print "%s: col: %s %s, missing: %d" % (key_name, i, label, missing)
print "inspect typeList:", typeList
# make missingList empty if all 0's
if sum(missingList)==0:
missingList = []
# no type per col in inspect2
numCols = len(frame['columns'])
numRows = frame['rows']
print "\n%s numRows: %s, numCols: %s" % (key_name, numRows, numCols)
return missingList, labelList, numRows, numCols
#************************************************************************
# does all columns unless you specify column index.
# only will return first or specified column
def runSummary(node=None, key=None, column=None, expected=None, maxDelta=None, noPrint=False, **kwargs):
if not key: raise Exception('No key for Summary')
if not node: node = h2o_nodes.nodes[0]
# return node.summary(key, **kwargs)
i = InspectObj(key=key)
# just so I don't have to change names below
missingList = i.missingList
labelList = i.labelList
numRows = i.numRows
numCols = i.numCols
print "labelList:", labelList
assert labelList is not None
# doesn't take indices? only column labels?
# return first column, unless specified
if not (column is None or isinstance(column, (basestring, int))):
raise Exception("column param should be string or integer index or None %s %s" % (type(column), column))
# either return the first col, or the col indentified by label. the column identifed could be string or index?
if column is None: # means the summary json when we ask for col 0, will be what we return (do all though)
colNameToDo = labelList
colIndexToDo = range(len(labelList))
elif isinstance(column, int):
colNameToDo = [labelList[column]]
colIndexToDo = [column]
elif isinstance(column, basestring):
colNameToDo = [column]
if column not in labelList:
raise Exception("% not in labellist: %s" % (column, labellist))
colIndexToDo = [labelList.index(column)]
else:
raise Exception("wrong type %s for column %s" % (type(column), column))
# we get the first column as result after walking across all, if no column parameter
desiredResult = None
for (colIndex, colName) in zip(colIndexToDo, colNameToDo):
print "doing summary on %s %s" % (colIndex, colName)
# ugly looking up the colIndex
co = SummaryObj(key=key, colIndex=colIndex, colName=colName)
if not desiredResult:
desiredResult = co
if not noPrint:
for k,v in co:
# only print [0] of mins and maxs because of the e308 values when they don't have dataset values
if k=='mins' or k=='maxs':
print "%s[0]" % k, v[0]
else:
print k, v
if expected is not None:
print "len(co.histogram_bins):", len(co.histogram_bins)
print "co.label:", co.label, "mean (2 places):", h2o_util.twoDecimals(co.mean)
# what is precision. -1?
print "co.label:", co.label, "std dev. (2 places):", h2o_util.twoDecimals(co.sigma)
# print "FIX! hacking the co.percentiles because it's short by two"
# if co.percentiles:
# percentiles = [0] + co.percentiles + [0]
# else:
# percentiles = None
percentiles = co.percentiles
assert len(co.percentiles) == len(co.default_percentiles)
# the thresholds h2o used, should match what we expected
# expected = [0] * 5
# Fix. doesn't check for expected = 0?
# max of one bin
if maxDelta is None:
maxDelta = (co.maxs[0] - co.mins[0])/1000
if expected[0]: h2o_util.assertApproxEqual(co.mins[0], expected[0], tol=maxDelta,
msg='min is not approx. expected')
if expected[1]: h2o_util.assertApproxEqual(percentiles[2], expected[1], tol=maxDelta,
msg='25th percentile is not approx. expected')
if expected[2]: h2o_util.assertApproxEqual(percentiles[4], expected[2], tol=maxDelta,
msg='50th percentile (median) is not approx. expected')
if expected[3]: h2o_util.assertApproxEqual(percentiles[6], expected[3], tol=maxDelta,
msg='75th percentile is not approx. expected')
if expected[4]: h2o_util.assertApproxEqual(co.maxs[0], expected[4], tol=maxDelta,
msg='max is not approx. expected')
# figure out the expected max error
# use this for comparing to sklearn/sort
MAX_QBINS = 1000
if expected[0] and expected[4]:
expectedRange = expected[4] - expected[0]
# because of floor and ceil effects due we potentially lose 2 bins (worst case)
# the extra bin for the max value, is an extra bin..ignore
expectedBin = expectedRange/(MAX_QBINS-2)
maxErr = expectedBin # should we have some fuzz for fp?
else:
print "Test won't calculate max expected error"
maxErr = 0
pt = h2o_util.twoDecimals(percentiles)
# only look at [0] for now...bit e308 numbers if unpopulated due to not enough unique values in dataset column
mx = h2o_util.twoDecimals(co.maxs[0])
mn = h2o_util.twoDecimals(co.mins[0])
print "co.label:", co.label, "co.percentiles (2 places):", pt
print "co.default_percentiles:", co.default_percentiles
print "co.label:", co.label, "co.maxs: (2 places):", mx
print "co.label:", co.label, "co.mins: (2 places):", mn
# FIX! why would percentiles be None? enums?
if pt is None:
compareActual = mn, [None] * 3, mx
else:
compareActual = mn, pt[2], pt[4], pt[6], mx
h2p.green_print("actual min/25/50/75/max co.label:", co.label, "(2 places):", compareActual)
h2p.green_print("expected min/25/50/75/max co.label:", co.label, "(2 places):", expected)
return desiredResult
# this parses the json object returned for one col from runSummary...returns an OutputObj object
# summaryResult = h2o_cmd.runSummary(key=hex_key, column=0)
# co = h2o_cmd.infoFromSummary(summaryResult)
# print co.label
# legacy
def infoFromSummary(summaryResult, column=None):
return SummaryObj(summaryResult, column=column)
class ParseObj(OutputObj):
# the most basic thing is that the data frame has the # of rows and cols we expected
# embed that checking here, so every test doesn't have to
def __init__(self, parseResult, expectedNumRows=None, expectedNumCols=None, noPrint=False, **kwargs):
super(ParseObj, self).__init__(parseResult['frames'][0], "Parse", noPrint=noPrint)
# add my stuff
self.numRows, self.numCols, self.parse_key = infoFromParse(parseResult)
# h2o_import.py does this for test support
if 'python_elapsed' in parseResult:
self.python_elapsed = parseResult['python_elapsed']
if expectedNumRows is not None:
assert self.numRows == expectedNumRows, "%s %s" % (self.numRows, expectedNumRows)
if expectedNumCols is not None:
assert self.numCols == expectedNumCols, "%s %s" % (self.numCols, expectedNumCols)
print "ParseObj created for:", self.parse_key # vars(self)
# Let's experiment with creating new objects that are an api I control for generic operations (Inspect)
class InspectObj(OutputObj):
# the most basic thing is that the data frame has the # of rows and cols we expected
# embed that checking here, so every test doesn't have to
def __init__(self, key,
expectedNumRows=None, expectedNumCols=None, expectedMissingList=None, expectedLabelList=None,
noPrint=False, **kwargs):
inspectResult = runInspect(key=key)
super(InspectObj, self).__init__(inspectResult['frames'][0], "Inspect", noPrint=noPrint)
# add my stuff
self.missingList, self.labelList, self.numRows, self.numCols = infoFromInspect(inspectResult)
if expectedNumRows is not None:
assert self.numRows == expectedNumRows, "%s %s" % (self.numRows, expectedNumRows)
if expectedNumCols is not None:
assert self.numCols == expectedNumCols, "%s %s" % (self.numCols, expectedNumCols)
if expectedMissingList is not None:
assert self.missingList == expectedMissingList, "%s %s" % (self.MissingList, expectedMissingList)
if expectedLabelList is not None:
assert self.labelList == expectedLabelList, "%s %s" % (self.labelList, expectedLabelList)
print "InspectObj created for:", key #, vars(self)
class SummaryObj(OutputObj):
@classmethod
def check(self,
expectedNumRows=None, expectedNumCols=None,
expectedLabel=None, expectedType=None, expectedMissing=None, expectedDomain=None, expectedBinsSum=None,
noPrint=False, **kwargs):
if expectedLabel is not None:
assert self.label != expectedLabel
if expectedType is not None:
assert self.type != expectedType
if expectedMissing is not None:
assert self.missing != expectedMissing
if expectedDomain is not None:
assert self.domain != expectedDomain
if expectedBinsSum is not None:
assert self.binsSum != expectedBinsSum
# column is column name?
def __init__(self, key, colIndex, colName,
expectedNumRows=None, expectedNumCols=None,
expectedLabel=None, expectedType=None, expectedMissing=None, expectedDomain=None, expectedBinsSum=None,
noPrint=False, timeoutSecs=30, **kwargs):
# we need both colInndex and colName for doing Summary efficiently
# ugly.
assert colIndex is not None
assert colName is not None
summaryResult = h2o_nodes.nodes[0].summary(key=key, column=colName, timeoutSecs=timeoutSecs, **kwargs)
# this should be the same for all the cols? Or does the checksum change?
frame = summaryResult['frames'][0]
default_percentiles = frame['default_percentiles']
checksum = frame['checksum']
rows = frame['rows']
# assert colIndex < len(frame['columns']), "You're asking for colIndex %s but there are only %s. " % \
# (colIndex, len(frame['columns']))
# coJson = frame['columns'][colIndex]
# is it always 0 now? the one I asked for ?
coJson = frame['columns'][0]
assert checksum !=0 and checksum is not None
assert rows!=0 and rows is not None
# FIX! why is frame['key'] = None here?
# assert frame['key'] == key, "%s %s" % (frame['key'], key)
super(SummaryObj, self).__init__(coJson, "Summary for %s" % colName, noPrint=noPrint)
# how are enums binned. Stride of 1? (what about domain values)
# touch all
# print "vars", vars(self)
coList = [
len(self.data),
self.domain,
self.string_data,
self.type,
self.label,
self.percentiles,
self.precision,
self.mins,
self.maxs,
self.mean,
self.histogram_base,
len(self.histogram_bins),
self.histogram_stride,
self.zero_count,
self.missing_count,
self.positive_infinity_count,
self.negative_infinity_count,
]
assert self.label==colName, "%s You must have told me the wrong colName %s for the given colIndex %s" % \
(self.label, colName, colIndex)
print "you can look at this attributes in the returned object (which is OutputObj if you assigned to 'co')"
for k,v in self:
print "%s" % k,
# hack these into the column object from the full summary
self.default_percentiles = default_percentiles
self.checksum = checksum
self.rows = rows
print "\nSummaryObj for", key, "for colName", colName, "colIndex:", colIndex
print "SummaryObj created for:", key # vars(self)
# now do the assertion checks
self.check(expectedNumRows, expectedNumCols,
expectedLabel, expectedType, expectedMissing, expectedDomain, expectedBinsSum,
noPrint=noPrint, **kwargs)
| apache-2.0 |
arabenjamin/scikit-learn | benchmarks/bench_plot_approximate_neighbors.py | 244 | 6011 | """
Benchmark for approximate nearest neighbor search using
locality sensitive hashing forest.
There are two types of benchmarks.
First, accuracy of LSHForest queries are measured for various
hyper-parameters and index sizes.
Second, speed up of LSHForest queries compared to brute force
method in exact nearest neighbors is measures for the
aforementioned settings. In general, speed up is increasing as
the index size grows.
"""
from __future__ import division
import numpy as np
from tempfile import gettempdir
from time import time
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors.approximate import LSHForest
from sklearn.datasets import make_blobs
from sklearn.externals.joblib import Memory
m = Memory(cachedir=gettempdir())
@m.cache()
def make_data(n_samples, n_features, n_queries, random_state=0):
"""Create index and query data."""
print('Generating random blob-ish data')
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=100,
shuffle=True, random_state=random_state)
# Keep the last samples as held out query vectors: note since we used
# shuffle=True we have ensured that index and query vectors are
# samples from the same distribution (a mixture of 100 gaussians in this
# case)
return X[:n_samples], X[n_samples:]
def calc_exact_neighbors(X, queries, n_queries, n_neighbors):
"""Measures average times for exact neighbor queries."""
print ('Building NearestNeighbors for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
average_time = 0
t0 = time()
neighbors = nbrs.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time = (time() - t0) / n_queries
return neighbors, average_time
def calc_accuracy(X, queries, n_queries, n_neighbors, exact_neighbors,
average_time_exact, **lshf_params):
"""Calculates accuracy and the speed up of LSHForest."""
print('Building LSHForest for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
lshf = LSHForest(**lshf_params)
t0 = time()
lshf.fit(X)
lshf_build_time = time() - t0
print('Done in %0.3fs' % lshf_build_time)
accuracy = 0
t0 = time()
approx_neighbors = lshf.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time_approx = (time() - t0) / n_queries
for i in range(len(queries)):
accuracy += np.in1d(approx_neighbors[i], exact_neighbors[i]).mean()
accuracy /= n_queries
speed_up = average_time_exact / average_time_approx
print('Average time for lshf neighbor queries: %0.3fs' %
average_time_approx)
print ('Average time for exact neighbor queries: %0.3fs' %
average_time_exact)
print ('Average Accuracy : %0.2f' % accuracy)
print ('Speed up: %0.1fx' % speed_up)
return speed_up, accuracy
if __name__ == '__main__':
import matplotlib.pyplot as plt
# Initialize index sizes
n_samples = [int(1e3), int(1e4), int(1e5), int(1e6)]
n_features = int(1e2)
n_queries = 100
n_neighbors = 10
X_index, X_query = make_data(np.max(n_samples), n_features, n_queries,
random_state=0)
params_list = [{'n_estimators': 3, 'n_candidates': 50},
{'n_estimators': 5, 'n_candidates': 70},
{'n_estimators': 10, 'n_candidates': 100}]
accuracies = np.zeros((len(n_samples), len(params_list)), dtype=float)
speed_ups = np.zeros((len(n_samples), len(params_list)), dtype=float)
for i, sample_size in enumerate(n_samples):
print ('==========================================================')
print ('Sample size: %i' % sample_size)
print ('------------------------')
exact_neighbors, average_time_exact = calc_exact_neighbors(
X_index[:sample_size], X_query, n_queries, n_neighbors)
for j, params in enumerate(params_list):
print ('LSHF parameters: n_estimators = %i, n_candidates = %i' %
(params['n_estimators'], params['n_candidates']))
speed_ups[i, j], accuracies[i, j] = calc_accuracy(
X_index[:sample_size], X_query, n_queries, n_neighbors,
exact_neighbors, average_time_exact, random_state=0, **params)
print ('')
print ('==========================================================')
# Set labels for LSHForest parameters
colors = ['c', 'm', 'y']
legend_rects = [plt.Rectangle((0, 0), 0.1, 0.1, fc=color)
for color in colors]
legend_labels = ['n_estimators={n_estimators}, '
'n_candidates={n_candidates}'.format(**p)
for p in params_list]
# Plot precision
plt.figure()
plt.legend(legend_rects, legend_labels,
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, accuracies[:, i], c=colors[i])
plt.plot(n_samples, accuracies[:, i], c=colors[i])
plt.ylim([0, 1.3])
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Precision@10")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Precision of first 10 neighbors with index size")
# Plot speed up
plt.figure()
plt.legend(legend_rects, legend_labels,
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, speed_ups[:, i], c=colors[i])
plt.plot(n_samples, speed_ups[:, i], c=colors[i])
plt.ylim(0, np.max(speed_ups))
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Speed up")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Relationship between Speed up and index size")
plt.show()
| bsd-3-clause |
tosolveit/scikit-learn | sklearn/neighbors/tests/test_ball_tree.py | 159 | 10196 | import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.ball_tree import (BallTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
rng = np.random.RandomState(10)
V = rng.rand(3, 3)
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'minkowski': dict(p=3),
'chebyshev': {},
'seuclidean': dict(V=np.random.random(DIMENSION)),
'wminkowski': dict(p=3, w=np.random.random(DIMENSION)),
'mahalanobis': dict(V=V)}
DISCRETE_METRICS = ['hamming',
'canberra',
'braycurtis']
BOOLEAN_METRICS = ['matching', 'jaccard', 'dice', 'kulsinski',
'rogerstanimoto', 'russellrao', 'sokalmichener',
'sokalsneath']
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_ball_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
bt = BallTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = bt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_ball_tree_query_boolean_metrics():
np.random.seed(0)
X = np.random.random((40, 10)).round(0)
Y = np.random.random((10, 10)).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in BOOLEAN_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_discrete_metrics():
np.random.seed(0)
X = (4 * np.random.random((40, 10))).round(0)
Y = (4 * np.random.random((10, 10))).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in DISCRETE_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = bt.query_radius([query_pt], r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_ball_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = bt.query_radius([query_pt], r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_ball_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
bt = BallTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = bt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true,
atol=atol, rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
bt = BallTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old version of scipy, doesn't accept "
"explicit bandwidth.")
dens_bt = bt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_bt, dens_gkde, decimal=3)
def test_ball_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
bt = BallTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = bt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_ball_tree_pickle():
np.random.seed(0)
X = np.random.random((10, 3))
bt1 = BallTree(X, leaf_size=1)
# Test if BallTree with callable metric is picklable
bt1_pyfunc = BallTree(X, metric=dist_func, leaf_size=1, p=2)
ind1, dist1 = bt1.query(X)
ind1_pyfunc, dist1_pyfunc = bt1_pyfunc.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(bt1, protocol=protocol)
bt2 = pickle.loads(s)
s_pyfunc = pickle.dumps(bt1_pyfunc, protocol=protocol)
bt2_pyfunc = pickle.loads(s_pyfunc)
ind2, dist2 = bt2.query(X)
ind2_pyfunc, dist2_pyfunc = bt2_pyfunc.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1_pyfunc, ind2_pyfunc)
assert_array_almost_equal(dist1_pyfunc, dist2_pyfunc)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
def test_query_haversine():
np.random.seed(0)
X = 2 * np.pi * np.random.random((40, 2))
bt = BallTree(X, leaf_size=1, metric='haversine')
dist1, ind1 = bt.query(X, k=5)
dist2, ind2 = brute_force_neighbors(X, X, k=5, metric='haversine')
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
| bsd-3-clause |
anomam/pvlib-python | pvlib/tests/iotools/test_midc.py | 1 | 2919 | import pandas as pd
from pandas.util.testing import network
import pytest
import pytz
from pvlib.iotools import midc
from conftest import DATA_DIR, RERUNS, RERUNS_DELAY
@pytest.fixture
def test_mapping():
return {
'Direct Normal [W/m^2]': 'dni',
'Global PSP [W/m^2]': 'ghi',
'Rel Humidity [%]': 'relative_humidity',
'Temperature @ 2m [deg C]': 'temp_air',
'Non Existant': 'variable',
}
MIDC_TESTFILE = DATA_DIR / 'midc_20181014.txt'
MIDC_RAW_TESTFILE = DATA_DIR / 'midc_raw_20181018.txt'
MIDC_RAW_SHORT_HEADER_TESTFILE = (
DATA_DIR / 'midc_raw_short_header_20191115.txt')
# TODO: not used, remove?
# midc_network_testfile = ('https://midcdmz.nrel.gov/apps/data_api.pl'
# '?site=UAT&begin=20181018&end=20181019')
def test_midc_format_index():
data = pd.read_csv(MIDC_TESTFILE)
data = midc.format_index(data)
start = pd.Timestamp("20181014 00:00")
start = start.tz_localize("MST")
end = pd.Timestamp("20181014 23:59")
end = end.tz_localize("MST")
assert type(data.index) == pd.DatetimeIndex
assert data.index[0] == start
assert data.index[-1] == end
def test_midc_format_index_tz_conversion():
data = pd.read_csv(MIDC_TESTFILE)
data = data.rename(columns={'MST': 'PST'})
data = midc.format_index(data)
assert data.index[0].tz == pytz.timezone('Etc/GMT+8')
def test_midc_format_index_raw():
data = pd.read_csv(MIDC_RAW_TESTFILE)
data = midc.format_index_raw(data)
start = pd.Timestamp('20181018 00:00')
start = start.tz_localize('MST')
end = pd.Timestamp('20181018 23:59')
end = end.tz_localize('MST')
assert data.index[0] == start
assert data.index[-1] == end
def test_read_midc_var_mapping_as_arg(test_mapping):
data = midc.read_midc(MIDC_TESTFILE, variable_map=test_mapping)
assert 'ghi' in data.columns
assert 'temp_air' in data.columns
@network
@pytest.mark.remote_data
@pytest.mark.flaky(reruns=RERUNS, reruns_delay=RERUNS_DELAY)
def test_read_midc_raw_data_from_nrel():
start_ts = pd.Timestamp('20181018')
end_ts = pd.Timestamp('20181019')
var_map = midc.MIDC_VARIABLE_MAP['UAT']
data = midc.read_midc_raw_data_from_nrel('UAT', start_ts, end_ts, var_map)
for k, v in var_map.items():
assert v in data.columns
assert data.index.size == 2880
def test_read_midc_header_length_mismatch(mocker):
mock_data = mocker.MagicMock()
with MIDC_RAW_SHORT_HEADER_TESTFILE.open() as f:
mock_data.text = f.read()
mocker.patch('pvlib.iotools.midc.requests.get',
return_value=mock_data)
start = pd.Timestamp('2019-11-15T00:00:00-06:00')
end = pd.Timestamp('2019-11-15T23:59:00-06:00')
data = midc.read_midc_raw_data_from_nrel('', start, end)
assert isinstance(data.index, pd.DatetimeIndex)
assert data.index[0] == start
assert data.index[-1] == end
| bsd-3-clause |
anorfleet/kaggle-titanic | KaggleAux/predict.py | 6 | 3114 | import numpy as np
from pandas import DataFrame
from patsy import dmatrices
def get_dataframe_intersection(df, comparator1, comparator2):
"""
Return a dataframe with only the columns found in a comparative dataframe.
Parameters
----------
comparator1: DataFrame
DataFrame to preform comparison on.
comparator2: DataFrame
DataFrame to compare with.
Returns
-------
DataFrame:
Data frame with columns not found in comparator dropped.
"""
to_drop = list((c for c in comparator1 if c not in comparator2))
return df.drop(to_drop, axis=1)
def get_dataframes_intersections(df1, comparator1, df2, comparator2):
"""
Return DataFrames with the intersection of their column values.
Parameters
----------
comparator1: DataFrame
DataFrame to preform comparison on.
comparator2: DataFrame
DataFrame to compare with.
Returns
-------
Tuple:
The resultingDataframe with columns not found in comparator dropped.
"""
comparator1 = get_dataframe_intersection(df1, comparator1, comparator2)
comparator2 = get_dataframe_intersection(df2, comparator2, comparator1)
return comparator1, comparator2
def predict(test_data, results, model_name):
"""
Return predictions of based on model resutls.
Parameters
----------
test_data: DataFrame
should be test data you are trying to predict
results: dict
should be dict of your models results wrapper and the formula used
to produce it.
ie.
results['Model_Name'] = {
[<statsmodels.regression.linear_model.RegressionResultsWrapper> ,
"Price ~ I(Supply, Demand)]
}
model_name: str
should be the name of your model. You can iterate through the results dict.
Returns
-------
NumPy array
Predictions in a flat NumPy array.
Example
-------
results = {'Logit': [<statsmodels.discrete.discrete_model.BinaryResultsWrapper at 0x117896650>,
'survived ~ C(pclass) + C(sex) + age + sibsp + C(embarked)']}
compared_resuts = predict(test_data, results, 'Logit')
"""
model_params = DataFrame(results[model_name][0].params)
formula = results[model_name][1]
# Create regression friendly test DataFrame
yt, xt = dmatrices(formula, data=test_data, return_type='dataframe')
xt, model_params = get_dataframes_intersections(xt, xt.columns,
model_params, model_params.index)
# Convert to NumPy arrays for performance
model_params = np.asarray(model_params)
yt = np.asarray(yt)
yt = yt.ravel()
xt = np.asarray(xt)
# Use our models to create predictions
row, col = xt.shape
model_parameters = model_params.ravel()
model_array = list((model_parameters for parameter in xrange(row)))
model_array = np.asarray(model_array)
# Multiply matrix together
predictions = np.multiply(xt, model_array)
predictions = np.sum(predictions, axis=1)
return predictions
| apache-2.0 |
gerritholl/pyatmlab | pyatmlab/graphics.py | 1 | 9084 | #!/usr/bin/env python
# coding: utf-8
"""Interact with matplotlib and other plotters
"""
import os.path
import datetime
now = datetime.datetime.now
import logging
import subprocess
import sys
import pickle
import lzma
import pathlib
import numpy
import matplotlib
import matplotlib.cbook
import matplotlib.pyplot
import mpl_toolkits.basemap
from typhon import config
from . import io
from . import meta
from . import tools
def pcolor_on_map(m, lon, lat, C, **kwargs):
"""Wrapper around pcolor on a map, in case we cross the IDL
preventing spurious lines
"""
# Need to investigate why and how to solve:
# -175 - minor polar problems (5° missing)
# -178 - polar problems (2° missing)
# -179 - some problems (1° missing)
# -179.9 - many problems
# perhaps I need to mask neighbours or so?
C1 = numpy.ma.masked_where((lon<-175)|(lon>175), C, copy=True)
p1 = m.pcolor(lon, lat, C1, latlon=True, **kwargs)
# C2 = numpy.ma.masked_where(lon<0, C.data)
# p2 = m.pcolor(lon, lat, C2, latlon=True, **kwargs)
# mixed = lon.ptp(1)>300
# homog = ~mixed
# homognum = homog.nonzero()[0]
#
# breaks = numpy.diff(homognum) > 5
# breakedge = numpy.r_[-1, breaks.nonzero()[0], homog.sum()-1]
#
# for (l, e) in zip(breakedge, breakedge[1:]):
# p1 = m.pcolor(lon[homognum[(l+1):(e+1)], :],
# lat[homognum[(l+1):(e+1)], :],
# C[homognum[(l+1):(e+1)], :],
# latlon=True, **kwargs)
# lon.mask = lat.mask = C.mask = west
# p2 = m.pcolor(lon, lat, C, latlon=True, **kwargs)
# remaining lines manually
# for mix in mixed.nonzero()[0]:
# west = lon[mix, :] <= 0
# east = lon[mix, :] > 0
# for h in (west, east):
# m.pcolor(lon[mix, h],
# lat[mix, h],
# C[mix, h],
# latlon=True, **kwargs)
# For some reason, if I don't include ascontiguousarray here,
# I run into a SystemError in proj4. I haven't been able to
# find a minimum example that reproduces the bug :(
#
# And another bug: I can't pcolor a single line when using
# latlon=True, as shiftdata will fail...
#
# But even when I can, it still goes wrong because pcolor
# doesn't show the single line... :( why is masking not
# working?
# (x, y) = m(numpy.ascontiguousarray(lon[mix:(mix+1), h]),
# numpy.ascontiguousarray(lat[mix:(mix+1), h]))
# m.pcolor(x, y, C[mix:(mix+1), h], latlon=False, **kwargs)
return p1
def map_orbit_double_with_stats(lon, lat, C, U, lab1, lab2, title, filename):
"""Map orbit with uncertainty and histograms
"""
(f, a_all) = matplotlib.pyplot.subplots(2, 4,
gridspec_kw = {'width_ratios':[12, 1, 3, 8],
"hspace": 0.3},
figsize=(15, 8))
# workaround for easy way of creating extra space...
for a in a_all[:, 2]:
a.set_visible(False)
m_all = []
for a in a_all[:, 0]:
m = mpl_toolkits.basemap.Basemap(projection="moll",
resolution="c", ax=a, lon_0=0)
m.drawcoastlines()
m.drawmeridians(numpy.arange(-180, 180, 30))
m.drawparallels(numpy.arange(-90, 90, 30))
m_all.append(m)
pcr = pcolor_on_map(
m_all[0], lon, lat,
C, cmap="viridis")
pcu = pcolor_on_map(
m_all[1], lon, lat,
U, cmap="inferno_r")
cb1 = f.colorbar(pcr, cax=a_all[0, 1])
cb1.set_label(lab1)#"Counts")
cb2 = f.colorbar(pcu, cax=a_all[1, 1])
cb2.set_label(lab2)#"Random uncertainty [counts]")
a_all[0, 3].hist(C.ravel(), 50)
a_all[0, 3].set_xlabel(lab1)#"Counts")
a_all[1, 3].hist(U.ravel(), 50)
a_all[1, 3].set_xlabel(lab2)#r"$\Delta$ Counts")
for a in a_all[:, 3]:
a.grid("on")
a.set_ylabel("Number")
f.suptitle(title)
#f.subplots_adjust(wspace=0.2, hspace=0.2)
print_or_show(f, False, filename)
def plotdir():
"""Returns todays plotdir.
Configuration 'plotdir' must be set. Value is expanded with strftime.
"""
return datetime.date.today().strftime(config.conf["main"]['plotdir'])
def print_or_show(fig, show, outfile, in_plotdir=True, tikz=None,
data=None, store_meta="", close=True,
dump_pickle=True):
"""Either print or save figure, or both, depending on arguments.
Taking a figure, show and/or save figure in the default directory,
obtained with :func:plotdir. Creates plot directory if needed.
:param fig: Figure to store.
:type fig: matplotlib.Figure object
:param show: Show figure or not
:type show: boolean
:param outfile: File to write figure to, or list of files. If the
string ends in a '.', write to x.png and x.pdf.
:type outfile: string or list of strings
:param in_plotdir: If true, write to default plot directory. If
false, write to currect directory or use absolute path.
:type in_plotdir: boolean
:param tikz: Try to write tikz code with matplotlib2tikz. Requires
that the latter is installed.
:type tikz: boolean
:param data: Store associated data in .dat file (useful for pgfplots).
May be a list of ndarrays, which results in multiple numbered datafiles.
:type data: ndarray or list thereof
:param store_meta: Also store other info. This is a string that will
be written to a file. If not set or set to None, it will just
write the pyatmlab version. The file will use the same basename
as the outfile, but replacing the extention by "info". However,
this only works if outfile is a string and not a list thereof.
To write nothing, pass an empty string.
:type store_meta: str.
:param close: If true, close figure. Defaults to true.
:type close: bool.
"""
if outfile is not None:
outfiles = [outfile] if isinstance(outfile, str) else outfile
bs = pathlib.Path(plotdir())
if isinstance(outfile, str):
if outfile.endswith("."):
outfiles = [bs / pathlib.Path(outfile+ext) for ext in ("png", "pdf")]
infofile = bs / pathlib.Path(outfile + "info")
figfile = bs / pathlib.Path(outfile + "pkl.xz")
else:
outfiles = [bs / pathlib.Path(outfile)]
infofile = None
figfile = None
if infofile is not None:
infofile.parent.mkdir(parents=True, exist_ok=True)
logging.debug("Obtaining verbose stack info")
pr = subprocess.run(["pip", "freeze"], stdout=subprocess.PIPE)
info = " ".join(sys.argv) + "\n" + pr.stdout.decode("utf-8") + "\n"
info += tools.get_verbose_stack_description()
# if infofile is not None and info:
logging.info("Writing info to {!s}".format(infofile))
with infofile.open("w", encoding="utf-8") as fp:
fp.write(info)
if dump_pickle and figfile is not None:
logging.info("Writing figure object to {!s}".format(figfile))
with lzma.open(str(figfile), "wb", preset=lzma.PRESET_DEFAULT) as fp:
pickle.dump(fig, fp, protocol=4)
# interpret as sequence
for outf in outfiles:
logging.info("Writing to file: {!s}".format(outf))
outf.parent.mkdir(parents=True, exist_ok=True)
i = 0
while True:
i += 1
try:
fig.canvas.print_figure(str(outf))
except matplotlib.cbook.Locked.TimeoutError:
logging.warning("Failed attempt no. {:d}".format(i))
if i > 100:
raise
else:
break
if show:
matplotlib.pyplot.show()
if close:
matplotlib.pyplot.close(fig)
if tikz is not None:
import matplotlib2tikz
print(now(), "Writing also to:", os.path.join(plotdir(), tikz))
matplotlib2tikz.save(os.path.join(plotdir(), tikz))
if data is not None:
if not os.path.exists(io.plotdatadir()):
os.makedirs(io.plotdatadir())
if isinstance(data, numpy.ndarray):
data = (data,)
# now take it as a loop
for (i, dat) in enumerate(data):
outf = os.path.join(io.plotdatadir(),
"{:s}{:d}.dat".format(
os.path.splitext(outfiles[0])[0], i))
fmt = ("%d" if issubclass(dat.dtype.type, numpy.integer) else
'%.18e')
if len(dat.shape) < 3:
numpy.savetxt(outf, dat, fmt=fmt)
elif len(dat.shape) == 3:
io.savetxt_3d(outf, dat, fmt=fmt)
else:
raise ValueError("Cannot write {:d}-dim ndarray to textfile".format(
len(dat.shape)))
| bsd-3-clause |
lancezlin/ml_template_py | lib/python2.7/site-packages/matplotlib/delaunay/interpolate.py | 8 | 7288 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import numpy as np
from matplotlib._delaunay import compute_planes, linear_interpolate_grid
from matplotlib._delaunay import nn_interpolate_grid
from matplotlib._delaunay import nn_interpolate_unstructured
__all__ = ['LinearInterpolator', 'NNInterpolator']
def slice2gridspec(key):
"""Convert a 2-tuple of slices to start,stop,steps for x and y.
key -- (slice(ystart,ystop,ystep), slice(xtart, xstop, xstep))
For now, the only accepted step values are imaginary integers (interpreted
in the same way numpy.mgrid, etc. do).
"""
if ((len(key) != 2) or
(not isinstance(key[0], slice)) or
(not isinstance(key[1], slice))):
raise ValueError("only 2-D slices, please")
x0 = key[1].start
x1 = key[1].stop
xstep = key[1].step
if not isinstance(xstep, complex) or int(xstep.real) != xstep.real:
raise ValueError("only the [start:stop:numsteps*1j] form supported")
xstep = int(xstep.imag)
y0 = key[0].start
y1 = key[0].stop
ystep = key[0].step
if not isinstance(ystep, complex) or int(ystep.real) != ystep.real:
raise ValueError("only the [start:stop:numsteps*1j] form supported")
ystep = int(ystep.imag)
return x0, x1, xstep, y0, y1, ystep
class LinearInterpolator(object):
"""Interpolate a function defined on the nodes of a triangulation by
using the planes defined by the three function values at each corner of
the triangles.
LinearInterpolator(triangulation, z, default_value=numpy.nan)
triangulation -- Triangulation instance
z -- the function values at each node of the triangulation
default_value -- a float giving the default value should the interpolating
point happen to fall outside of the convex hull of the triangulation
At the moment, the only regular rectangular grids are supported for
interpolation.
vals = interp[ystart:ystop:ysteps*1j, xstart:xstop:xsteps*1j]
vals would then be a (ysteps, xsteps) array containing the interpolated
values. These arguments are interpreted the same way as numpy.mgrid.
Attributes:
planes -- (ntriangles, 3) array of floats specifying the plane for each
triangle.
Linear Interpolation
--------------------
Given the Delauany triangulation (or indeed *any* complete triangulation)
we can interpolate values inside the convex hull by locating the enclosing
triangle of the interpolation point and returning the value at that point
of the plane defined by the three node values.
f = planes[tri,0]*x + planes[tri,1]*y + planes[tri,2]
The interpolated function is C0 continuous across the convex hull of the
input points. It is C1 continuous across the convex hull except for the
nodes and the edges of the triangulation.
"""
def __init__(self, triangulation, z, default_value=np.nan):
self.triangulation = triangulation
self.z = np.asarray(z, dtype=np.float64)
self.default_value = default_value
self.planes = compute_planes(triangulation.x, triangulation.y, self.z,
triangulation.triangle_nodes)
def __getitem__(self, key):
x0, x1, xstep, y0, y1, ystep = slice2gridspec(key)
grid = linear_interpolate_grid(
x0, x1, xstep, y0, y1, ystep, self.default_value,
self.planes, self.triangulation.x, self.triangulation.y,
self.triangulation.triangle_nodes,
self.triangulation.triangle_neighbors)
return grid
class NNInterpolator(object):
"""Interpolate a function defined on the nodes of a triangulation by
the natural neighbors method.
NNInterpolator(triangulation, z, default_value=numpy.nan)
triangulation -- Triangulation instance
z -- the function values at each node of the triangulation
default_value -- a float giving the default value should the interpolating
point happen to fall outside of the convex hull of the triangulation
At the moment, the only regular rectangular grids are supported for
interpolation.
vals = interp[ystart:ystop:ysteps*1j, xstart:xstop:xsteps*1j]
vals would then be a (ysteps, xsteps) array containing the interpolated
values. These arguments are interpreted the same way as numpy.mgrid.
Natural Neighbors Interpolation
-------------------------------
One feature of the Delaunay triangulation is that for each triangle, its
circumcircle contains no other point (although in degenerate cases, like
squares, other points may be *on* the circumcircle). One can also
construct what is called the Voronoi diagram from a Delaunay triangulation
by connecting the circumcenters of the triangles to those of their
neighbors to form a tesselation of irregular polygons covering the plane
and containing only one node from the triangulation. Each point in one
node's Voronoi polygon is closer to that node than any other node.
To compute the Natural Neighbors interpolant, we consider adding the
interpolation point to the triangulation. We define the natural neighbors
of this point as the set of nodes participating in Delaunay triangles
whose circumcircles contain the point. To restore the Delaunay-ness of the
triangulation, one would only have to alter those triangles and Voronoi
polygons. The new Voronoi diagram would have a polygon around the
inserted point. This polygon would "steal" area from the original Voronoi
polygons. For each node i in the natural neighbors set, we compute the
area stolen from its original Voronoi polygon, stolen[i]. We define the
natural neighbors coordinates
phi[i] = stolen[i] / sum(stolen,axis=0)
We then use these phi[i] to weight the corresponding function values from
the input data z to compute the interpolated value.
The interpolated surface is C1-continuous except at the nodes themselves
across the convex hull of the input points. One can find the set of points
that a given node will affect by computing the union of the areas covered
by the circumcircles of each Delaunay triangle that node participates in.
"""
def __init__(self, triangulation, z, default_value=np.nan):
self.triangulation = triangulation
self.z = np.asarray(z, dtype=np.float64)
self.default_value = default_value
def __getitem__(self, key):
x0, x1, xstep, y0, y1, ystep = slice2gridspec(key)
grid = nn_interpolate_grid(
x0, x1, xstep, y0, y1, ystep, self.default_value,
self.triangulation.x, self.triangulation.y, self.z,
self.triangulation.circumcenters,
self.triangulation.triangle_nodes,
self.triangulation.triangle_neighbors)
return grid
def __call__(self, intx, inty):
intz = nn_interpolate_unstructured(intx, inty, self.default_value,
self.triangulation.x, self.triangulation.y, self.z,
self.triangulation.circumcenters,
self.triangulation.triangle_nodes,
self.triangulation.triangle_neighbors)
return intz
| mit |
duyhtq/cuda-convnet2 | shownet.py | 180 | 18206 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from tarfile import TarFile, TarInfo
from matplotlib import pylab as pl
import numpy as n
import getopt as opt
from python_util.util import *
from math import sqrt, ceil, floor
from python_util.gpumodel import IGPUModel
import random as r
import numpy.random as nr
from convnet import ConvNet
from python_util.options import *
from PIL import Image
from time import sleep
class ShowNetError(Exception):
pass
class ShowConvNet(ConvNet):
def __init__(self, op, load_dic):
ConvNet.__init__(self, op, load_dic)
def init_data_providers(self):
self.need_gpu = self.op.get_value('show_preds')
class Dummy:
def advance_batch(self):
pass
if self.need_gpu:
ConvNet.init_data_providers(self)
else:
self.train_data_provider = self.test_data_provider = Dummy()
def import_model(self):
if self.need_gpu:
ConvNet.import_model(self)
def init_model_state(self):
if self.op.get_value('show_preds'):
self.softmax_name = self.op.get_value('show_preds')
def init_model_lib(self):
if self.need_gpu:
ConvNet.init_model_lib(self)
def plot_cost(self):
if self.show_cost not in self.train_outputs[0][0]:
raise ShowNetError("Cost function with name '%s' not defined by given convnet." % self.show_cost)
# print self.test_outputs
train_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.train_outputs]
test_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.test_outputs]
if self.smooth_test_errors:
test_errors = [sum(test_errors[max(0,i-len(self.test_batch_range)):i])/(i-max(0,i-len(self.test_batch_range))) for i in xrange(1,len(test_errors)+1)]
numbatches = len(self.train_batch_range)
test_errors = n.row_stack(test_errors)
test_errors = n.tile(test_errors, (1, self.testing_freq))
test_errors = list(test_errors.flatten())
test_errors += [test_errors[-1]] * max(0,len(train_errors) - len(test_errors))
test_errors = test_errors[:len(train_errors)]
numepochs = len(train_errors) / float(numbatches)
pl.figure(1)
x = range(0, len(train_errors))
pl.plot(x, train_errors, 'k-', label='Training set')
pl.plot(x, test_errors, 'r-', label='Test set')
pl.legend()
ticklocs = range(numbatches, len(train_errors) - len(train_errors) % numbatches + 1, numbatches)
epoch_label_gran = int(ceil(numepochs / 20.))
epoch_label_gran = int(ceil(float(epoch_label_gran) / 10) * 10) if numepochs >= 10 else epoch_label_gran
ticklabels = map(lambda x: str((x[1] / numbatches)) if x[0] % epoch_label_gran == epoch_label_gran-1 else '', enumerate(ticklocs))
pl.xticks(ticklocs, ticklabels)
pl.xlabel('Epoch')
# pl.ylabel(self.show_cost)
pl.title('%s[%d]' % (self.show_cost, self.cost_idx))
# print "plotted cost"
def make_filter_fig(self, filters, filter_start, fignum, _title, num_filters, combine_chans, FILTERS_PER_ROW=16):
MAX_ROWS = 24
MAX_FILTERS = FILTERS_PER_ROW * MAX_ROWS
num_colors = filters.shape[0]
f_per_row = int(ceil(FILTERS_PER_ROW / float(1 if combine_chans else num_colors)))
filter_end = min(filter_start+MAX_FILTERS, num_filters)
filter_rows = int(ceil(float(filter_end - filter_start) / f_per_row))
filter_pixels = filters.shape[1]
filter_size = int(sqrt(filters.shape[1]))
fig = pl.figure(fignum)
fig.text(.5, .95, '%s %dx%d filters %d-%d' % (_title, filter_size, filter_size, filter_start, filter_end-1), horizontalalignment='center')
num_filters = filter_end - filter_start
if not combine_chans:
bigpic = n.zeros((filter_size * filter_rows + filter_rows + 1, filter_size*num_colors * f_per_row + f_per_row + 1), dtype=n.single)
else:
bigpic = n.zeros((3, filter_size * filter_rows + filter_rows + 1, filter_size * f_per_row + f_per_row + 1), dtype=n.single)
for m in xrange(filter_start,filter_end ):
filter = filters[:,:,m]
y, x = (m - filter_start) / f_per_row, (m - filter_start) % f_per_row
if not combine_chans:
for c in xrange(num_colors):
filter_pic = filter[c,:].reshape((filter_size,filter_size))
bigpic[1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size*num_colors) * x + filter_size*c:1 + (1 + filter_size*num_colors) * x + filter_size*(c+1)] = filter_pic
else:
filter_pic = filter.reshape((3, filter_size,filter_size))
bigpic[:,
1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size) * x:1 + (1 + filter_size) * x + filter_size] = filter_pic
pl.xticks([])
pl.yticks([])
if not combine_chans:
pl.imshow(bigpic, cmap=pl.cm.gray, interpolation='nearest')
else:
bigpic = bigpic.swapaxes(0,2).swapaxes(0,1)
pl.imshow(bigpic, interpolation='nearest')
def plot_filters(self):
FILTERS_PER_ROW = 16
filter_start = 0 # First filter to show
if self.show_filters not in self.layers:
raise ShowNetError("Layer with name '%s' not defined by given convnet." % self.show_filters)
layer = self.layers[self.show_filters]
filters = layer['weights'][self.input_idx]
# filters = filters - filters.min()
# filters = filters / filters.max()
if layer['type'] == 'fc': # Fully-connected layer
num_filters = layer['outputs']
channels = self.channels
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
elif layer['type'] in ('conv', 'local'): # Conv layer
num_filters = layer['filters']
channels = layer['filterChannels'][self.input_idx]
if layer['type'] == 'local':
filters = filters.reshape((layer['modules'], channels, layer['filterPixels'][self.input_idx], num_filters))
filters = filters[:, :, :, self.local_plane] # first map for now (modules, channels, pixels)
filters = filters.swapaxes(0,2).swapaxes(0,1)
num_filters = layer['modules']
# filters = filters.swapaxes(0,1).reshape(channels * layer['filterPixels'][self.input_idx], num_filters * layer['modules'])
# num_filters *= layer['modules']
FILTERS_PER_ROW = layer['modulesX']
else:
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
# Convert YUV filters to RGB
if self.yuv_to_rgb and channels == 3:
R = filters[0,:,:] + 1.28033 * filters[2,:,:]
G = filters[0,:,:] + -0.21482 * filters[1,:,:] + -0.38059 * filters[2,:,:]
B = filters[0,:,:] + 2.12798 * filters[1,:,:]
filters[0,:,:], filters[1,:,:], filters[2,:,:] = R, G, B
combine_chans = not self.no_rgb and channels == 3
# Make sure you don't modify the backing array itself here -- so no -= or /=
if self.norm_filters:
#print filters.shape
filters = filters - n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).mean(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1))
filters = filters / n.sqrt(n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).var(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1)))
#filters = filters - n.tile(filters.min(axis=0).min(axis=0), (3, filters.shape[1], 1))
#filters = filters / n.tile(filters.max(axis=0).max(axis=0), (3, filters.shape[1], 1))
#else:
filters = filters - filters.min()
filters = filters / filters.max()
self.make_filter_fig(filters, filter_start, 2, 'Layer %s' % self.show_filters, num_filters, combine_chans, FILTERS_PER_ROW=FILTERS_PER_ROW)
def plot_predictions(self):
epoch, batch, data = self.get_next_batch(train=False) # get a test batch
num_classes = self.test_data_provider.get_num_classes()
NUM_ROWS = 2
NUM_COLS = 4
NUM_IMGS = NUM_ROWS * NUM_COLS if not self.save_preds else data[0].shape[1]
NUM_TOP_CLASSES = min(num_classes, 5) # show this many top labels
NUM_OUTPUTS = self.model_state['layers'][self.softmax_name]['outputs']
PRED_IDX = 1
label_names = [lab.split(',')[0] for lab in self.test_data_provider.batch_meta['label_names']]
if self.only_errors:
preds = n.zeros((data[0].shape[1], NUM_OUTPUTS), dtype=n.single)
else:
preds = n.zeros((NUM_IMGS, NUM_OUTPUTS), dtype=n.single)
#rand_idx = nr.permutation(n.r_[n.arange(1), n.where(data[1] == 552)[1], n.where(data[1] == 795)[1], n.where(data[1] == 449)[1], n.where(data[1] == 274)[1]])[:NUM_IMGS]
rand_idx = nr.randint(0, data[0].shape[1], NUM_IMGS)
if NUM_IMGS < data[0].shape[1]:
data = [n.require(d[:,rand_idx], requirements='C') for d in data]
# data += [preds]
# Run the model
print [d.shape for d in data], preds.shape
self.libmodel.startFeatureWriter(data, [preds], [self.softmax_name])
IGPUModel.finish_batch(self)
print preds
data[0] = self.test_data_provider.get_plottable_data(data[0])
if self.save_preds:
if not gfile.Exists(self.save_preds):
gfile.MakeDirs(self.save_preds)
preds_thresh = preds > 0.5 # Binarize predictions
data[0] = data[0] * 255.0
data[0][data[0]<0] = 0
data[0][data[0]>255] = 255
data[0] = n.require(data[0], dtype=n.uint8)
dir_name = '%s_predictions_batch_%d' % (os.path.basename(self.save_file), batch)
tar_name = os.path.join(self.save_preds, '%s.tar' % dir_name)
tfo = gfile.GFile(tar_name, "w")
tf = TarFile(fileobj=tfo, mode='w')
for img_idx in xrange(NUM_IMGS):
img = data[0][img_idx,:,:,:]
imsave = Image.fromarray(img)
prefix = "CORRECT" if data[1][0,img_idx] == preds_thresh[img_idx,PRED_IDX] else "FALSE_POS" if preds_thresh[img_idx,PRED_IDX] == 1 else "FALSE_NEG"
file_name = "%s_%.2f_%d_%05d_%d.png" % (prefix, preds[img_idx,PRED_IDX], batch, img_idx, data[1][0,img_idx])
# gf = gfile.GFile(file_name, "w")
file_string = StringIO()
imsave.save(file_string, "PNG")
tarinf = TarInfo(os.path.join(dir_name, file_name))
tarinf.size = file_string.tell()
file_string.seek(0)
tf.addfile(tarinf, file_string)
tf.close()
tfo.close()
# gf.close()
print "Wrote %d prediction PNGs to %s" % (preds.shape[0], tar_name)
else:
fig = pl.figure(3, figsize=(12,9))
fig.text(.4, .95, '%s test samples' % ('Mistaken' if self.only_errors else 'Random'))
if self.only_errors:
# what the net got wrong
if NUM_OUTPUTS > 1:
err_idx = [i for i,p in enumerate(preds.argmax(axis=1)) if p not in n.where(data[2][:,i] > 0)[0]]
else:
err_idx = n.where(data[1][0,:] != preds[:,0].T)[0]
print err_idx
err_idx = r.sample(err_idx, min(len(err_idx), NUM_IMGS))
data[0], data[1], preds = data[0][:,err_idx], data[1][:,err_idx], preds[err_idx,:]
import matplotlib.gridspec as gridspec
import matplotlib.colors as colors
cconv = colors.ColorConverter()
gs = gridspec.GridSpec(NUM_ROWS*2, NUM_COLS,
width_ratios=[1]*NUM_COLS, height_ratios=[2,1]*NUM_ROWS )
#print data[1]
for row in xrange(NUM_ROWS):
for col in xrange(NUM_COLS):
img_idx = row * NUM_COLS + col
if data[0].shape[0] <= img_idx:
break
pl.subplot(gs[(row * 2) * NUM_COLS + col])
#pl.subplot(NUM_ROWS*2, NUM_COLS, row * 2 * NUM_COLS + col + 1)
pl.xticks([])
pl.yticks([])
img = data[0][img_idx,:,:,:]
pl.imshow(img, interpolation='lanczos')
show_title = data[1].shape[0] == 1
true_label = [int(data[1][0,img_idx])] if show_title else n.where(data[1][:,img_idx]==1)[0]
#print true_label
#print preds[img_idx,:].shape
#print preds[img_idx,:].max()
true_label_names = [label_names[i] for i in true_label]
img_labels = sorted(zip(preds[img_idx,:], label_names), key=lambda x: x[0])[-NUM_TOP_CLASSES:]
#print img_labels
axes = pl.subplot(gs[(row * 2 + 1) * NUM_COLS + col])
height = 0.5
ylocs = n.array(range(NUM_TOP_CLASSES))*height
pl.barh(ylocs, [l[0] for l in img_labels], height=height, \
color=['#ffaaaa' if l[1] in true_label_names else '#aaaaff' for l in img_labels])
#pl.title(", ".join(true_labels))
if show_title:
pl.title(", ".join(true_label_names), fontsize=15, fontweight='bold')
else:
print true_label_names
pl.yticks(ylocs + height/2, [l[1] for l in img_labels], x=1, backgroundcolor=cconv.to_rgba('0.65', alpha=0.5), weight='bold')
for line in enumerate(axes.get_yticklines()):
line[1].set_visible(False)
#pl.xticks([width], [''])
#pl.yticks([])
pl.xticks([])
pl.ylim(0, ylocs[-1] + height)
pl.xlim(0, 1)
def start(self):
self.op.print_values()
# print self.show_cost
if self.show_cost:
self.plot_cost()
if self.show_filters:
self.plot_filters()
if self.show_preds:
self.plot_predictions()
if pl:
pl.show()
sys.exit(0)
@classmethod
def get_options_parser(cls):
op = ConvNet.get_options_parser()
for option in list(op.options):
if option not in ('gpu', 'load_file', 'inner_size', 'train_batch_range', 'test_batch_range', 'multiview_test', 'data_path', 'pca_noise', 'scalar_mean'):
op.delete_option(option)
op.add_option("show-cost", "show_cost", StringOptionParser, "Show specified objective function", default="")
op.add_option("show-filters", "show_filters", StringOptionParser, "Show learned filters in specified layer", default="")
op.add_option("norm-filters", "norm_filters", BooleanOptionParser, "Individually normalize filters shown with --show-filters", default=0)
op.add_option("input-idx", "input_idx", IntegerOptionParser, "Input index for layer given to --show-filters", default=0)
op.add_option("cost-idx", "cost_idx", IntegerOptionParser, "Cost function return value index for --show-cost", default=0)
op.add_option("no-rgb", "no_rgb", BooleanOptionParser, "Don't combine filter channels into RGB in layer given to --show-filters", default=False)
op.add_option("yuv-to-rgb", "yuv_to_rgb", BooleanOptionParser, "Convert RGB filters to YUV in layer given to --show-filters", default=False)
op.add_option("channels", "channels", IntegerOptionParser, "Number of channels in layer given to --show-filters (fully-connected layers only)", default=0)
op.add_option("show-preds", "show_preds", StringOptionParser, "Show predictions made by given softmax on test set", default="")
op.add_option("save-preds", "save_preds", StringOptionParser, "Save predictions to given path instead of showing them", default="")
op.add_option("only-errors", "only_errors", BooleanOptionParser, "Show only mistaken predictions (to be used with --show-preds)", default=False, requires=['show_preds'])
op.add_option("local-plane", "local_plane", IntegerOptionParser, "Local plane to show", default=0)
op.add_option("smooth-test-errors", "smooth_test_errors", BooleanOptionParser, "Use running average for test error plot?", default=1)
op.options['load_file'].default = None
return op
if __name__ == "__main__":
#nr.seed(6)
try:
op = ShowConvNet.get_options_parser()
op, load_dic = IGPUModel.parse_options(op)
model = ShowConvNet(op, load_dic)
model.start()
except (UnpickleError, ShowNetError, opt.GetoptError), e:
print "----------------"
print "Error:"
print e
| apache-2.0 |
soligschlager/topography | sandbox/macaque/clustering_embedding_macaque.py | 2 | 1476 | #!/usr/bin/python
import sys, os, h5py, scipy, numpy as np
from sklearn.utils.arpack import eigsh
from sklearn.cluster import KMeans
from scipy.io.matlab import savemat
def main(argv):
# Set defaults:
n_components_embedding = 25
comp_min = 2
comp_max = 20 + 1
varname = 'data'
filename = './test'
# Import files
f = h5py.File(('%s.mat' % filename),'r')
dataCorr = np.array(f.get('%s' % varname))
# Prep matrix
K = (dataCorr + 1) / 2.
v = np.sqrt(np.sum(K, axis=1))
A = K/(v[:, None] * v[None, :])
del K
A = np.squeeze(A * [A > 0])
# Run embedding
lambdas, vectors = eigsh(A, k=n_components_embedding)
lambdas = lambdas[::-1]
vectors = vectors[:, ::-1]
psi = vectors/vectors[:, 0][:, None]
lambdas = lambdas[1:] / (1 - lambdas[1:])
embedding = psi[:, 1:(n_components_embedding + 1)] * lambdas[:n_components_embedding][None, :]
# Run kmeans clustering
def kmeans(embedding, n_components):
est = KMeans(n_clusters=n_components, n_jobs=-1, init='k-means++', n_init=300)
est.fit_transform(embedding)
labels = est.labels_
data = labels.astype(np.float)
return data
results = list()
for n_components in xrange(comp_min,comp_max):
results.append(kmeans(embedding, n_components))
savemat(('%s_results.mat' % filename), {'results':results})
if __name__ == "__main__":
main(sys.argv[1:])
| mit |
mkrapp/semic | optimize/plot_costs.py | 2 | 1367 | '''
plot the best PSO positions.
'''
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import sys
from tools import get_params
def plot_costs(fnm,show=True,savefig=False):
params, par_names = get_params()
print params
data = np.loadtxt(fnm,unpack=True)
mpl.rcParams['figure.figsize'] = 15,12
mpl.rcParams['font.size'] = 10
n = len(par_names)
f, axarr = plt.subplots(n,n)
j = 0
for v in par_names:
i = 0
for p in par_names:
if (j < i):
px = data[i+2,:]
py = data[j+2,:]
axarr[i,j].plot(px,py,'ko-',alpha=0.1,mew=0)
axarr[i,j].plot(px[-1],py[-1],'rs',alpha=0.5,markersize=7,mew=0)
axarr[i,j].set_xlabel(par_names[i])
axarr[i,j].set_ylabel(par_names[j])
axarr[i,j].set_xlim(float(params[i][1]),float(params[i][2]))
axarr[i,j].set_ylim(float(params[j][1]),float(params[j][2]))
else:
f.delaxes(axarr[i,j])
plt.setp(axarr[i,j].get_xticklabels(), rotation=30)
i += 1
j += 1
f.set_tight_layout(True)
if show: plt.show()
if savefig: f.savefig('parameter_matrix.pdf',type='PDF',bbox_inches='tight', pad_inches = 0.0)
if __name__ == "__main__":
plot_costs(sys.argv[1])
| mit |
levjj/rticomp | experiment-jpeg.py | 1 | 5075 | '''
File compress.py
Created on 11 Feb 2014
@author: Christopher Schuster, cschuste@ucsc.edu
'''
# General imports
from __future__ import print_function
import os,sys,subprocess,struct,io
# Possible use of matplotlib from http://http://matplotlib.sourceforge.net/
from pylab import *
import matplotlib.pyplot as plt
# More imports
import Image
import numpy
import ImageOps
'''
Get 2D matrix from an image file, possibly displayed with matplotlib
@param path: Image file path on HD
@return A 2D matrix
'''
def img_matrix(path):
img=Image.open(str(path))
imgData=img.getdata()
imgTab=numpy.array(imgData)
w,h=img.size
imgMat=numpy.reshape(imgTab / 255.0,(h,w,4))
return imgMat
def render(rti, light):
script = os.path.dirname(__file__) + "/render.js"
inp = os.path.dirname(__file__) + "/../data/" + rti
outp = os.path.dirname(__file__) + "/../out/" + rti + ".png"
cmd = "node " + script + " -i " + inp + " -o " + outp + \
" --lx=" + str(light[0]) + " --ly=" + str(light[1])
# print(cmd)
subprocess.call(cmd, shell=True)
return outp
def nextline(f):
line = f.readline()
if line == "":
raise Exception("EOF")
if line[0] == "#":
return nextline(f)
return line.split(" ")
def compress(rtifile,qua):
# print ("Compressing " + rtifile)
fi = open("data/" + rtifile, 'r')
if nextline(fi) != ["3\n"]:
raise Exception("Wrong file")
w,h,ch = [int(i) for i in nextline(fi)]
c_num,c_type,c_size = [int(i) for i in nextline(fi)]
if c_type != 2:
raise Exception("Not supported")
scales = struct.unpack('f'*c_num,fi.read(4*c_num))
biases = struct.unpack('f'*c_num,fi.read(4*c_num))
c_data = numpy.zeros((c_num,h,w,ch),dtype=numpy.uint8)
for y in range(h):
for x in range(w):
for i in range(ch):
for c in range(c_num):
c_data[c,y,x,i] = struct.unpack('B',fi.read(1))[0]
fi.close()
fo = open("out/vase-comp.jrti", 'w')
for scale in scales:
fo.write(struct.pack('f', scale))
for bias in biases:
fo.write(struct.pack('f', bias))
for i in range(c_num):
img = Image.new("RGB",(w,h))
cd = [tuple(c_data[i,y,x]) for y in range(h) for x in range(w)]
img.putdata(cd)
mem = io.BytesIO()
img.save(mem, "JPEG", quality=qua)
jpegdata = mem.getvalue()
mem.close()
fo.write(struct.pack('i', len(jpegdata)))
for b in jpegdata:
fo.write(b)
fo.close()
return "vase-comp.jrti"
def decompress(crtifile):
# print ("Decompressing " + crtifile)
fi = open("out/" + crtifile, 'r')
scales = struct.unpack('f'*9,fi.read(4*9))
biases = struct.unpack('f'*9,fi.read(4*9))
c_images = []
for i in range(9):
size = struct.unpack('i', fi.read(4))[0]
mem = io.BytesIO(fi.read(size))
jpeg = Image.open(mem)
#c = numpy.reshape(numpy.array(jpeg),(470,320)).astype(numpy.uint8)
c_images.append(numpy.array(jpeg.convert('RGB')))
mem.close()
fi.close()
fo = open("data/vase-jcomp.rti", 'w')
fo.write("#HSH1.2\n")
fo.write("3\n")
fo.write("320 470 3\n")
fo.write("9 2 1\n")
for scale in scales:
fo.write(struct.pack('f', scale))
for bias in biases:
fo.write(struct.pack('f', bias))
for y in range(470):
for x in range(320):
for i in range(3):
for c in range(9):
fo.write(struct.pack('B', c_images[c][y,x,i]))
fo.close()
return "vase-jcomp.rti"
def ploti(img, qua):
imgOutMat = img_matrix(img)
plt.imshow(imgOutMat, interpolation='nearest', hold=True)
plt.xticks([])
plt.yticks([])
plt.xlim(100, 200)
plt.ylim(100, 200)
#plt.show()
fn = 'out/j_' + str(int(qua)) + '.png'
plt.savefig(fn, dpi=80,bbox_inches='tight')
def measure(ucrti, lightx, lighty, qua):
uncomp = render('vase.rti', [lightx, lighty])
comp = render(ucrti, [lightx, lighty])
ploti(comp, qua)
res = {}
cmd = "dssim/dssim " + uncomp + " " + comp
res["ssim"] = subprocess.check_output(cmd, shell=True)
cmd = "compare -metric PSNR " + uncomp + " " + comp + " /dev/null 2>&1"
res["psnr"] = subprocess.check_output(cmd, shell=True)
cmd = "compare -metric RMSE " + uncomp + " " + comp + " /dev/null 2>&1"
res["rmse"] = subprocess.check_output(cmd, shell=True)
res["osize"] = os.path.getsize("data/vase.rti")
res["csize"] = os.path.getsize("out/vase-comp.jrti")
res["comp"] = (res["osize"] + 0.0) / res["csize"]
return res
def run(qua):
crti = compress("vase.rti", qua)
ucrti = decompress(crti)
res = measure(ucrti, 50.0, 50.0, qua)
print("JPEG", end=";")
print("vase.rti", end=";")
print(res["comp"], end=";")
print(res["psnr"].strip(), end=";")
print(res["rmse"].strip(), end=";")
print(res["ssim"].strip(), end=";")
print(qua)
print("Method;File;CompRatio;PSNR;RMSE;SSIM;Qua")
for qua in range(1,5,1):
run(qua)
| mit |
ajheaps/cf-plot | cfplot/cfplot.py | 1 | 336361 | """
Climate contour/vector plots using cf-python, matplotlib and cartopy.
Andy Heaps NCAS-CMS April 2021
"""
import numpy as np
import subprocess
from scipy import interpolate
import matplotlib
from copy import deepcopy
import os
import sys
import matplotlib.pyplot as plot
from matplotlib.collections import PolyCollection
from distutils.version import StrictVersion
import cartopy.crs as ccrs
import cartopy.util as cartopy_util
import cartopy.feature as cfeature
from scipy.interpolate import griddata
import shapely.geometry as sgeom
import shapely
from matplotlib.collections import PatchCollection
# Check for the minimum cf-python version
cf_version_min = '3.0.0b2'
errstr = '\n\n cf-python > ' + cf_version_min
errstr += '\n needs to be installed to use cf-plot \n\n'
try:
import cf
if StrictVersion(cf.__version__) < StrictVersion(cf_version_min):
raise Warning(errstr)
except ImportError:
raise Warning(errstr)
# Initiate the pvars class
# This is used for storing plotting variables in cfp.plotvars
class pvars(object):
def __init__(self, **kwargs):
'''Initialize a new Pvars instance'''
for attr, value in kwargs.items():
setattr(self, attr, value)
def __str__(self):
'''x.__str__() <==> str(x)'''
a = None
v = None
out = ['%s = %s' % (a, repr(v))]
for a, v in self.__dict__.items():
return '\n'.join(out)
# Check for a display and use the Agg backing store if none is present
# This is for batch mode processing
try:
disp = os.environ["DISPLAY"]
except Exception:
matplotlib.use('Agg')
# Code to check if the ImageMagick display command is available
def which(program):
def is_exe(fpath):
return os.path.exists(fpath) and os.access(fpath, os.X_OK)
def ext_candidates(fpath):
yield fpath
for ext in os.environ.get("PATHEXT", "").split(os.pathsep):
yield fpath + ext
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
for candidate in ext_candidates(exe_file):
if is_exe(candidate):
return candidate
return None
# Default colour scales
# cscale1 is a differential data scale - blue to red
cscale1 = ['#0a3278', '#0f4ba5', '#1e6ec8', '#3ca0f0', '#50b4fa', '#82d2ff',
'#a0f0ff', '#c8faff', '#e6ffff', '#fffadc', '#ffe878', '#ffc03c',
'#ffa000', '#ff6000', '#ff3200', '#e11400', '#c00000', '#a50000']
# viridis is a continuous data scale - blue, green, yellow
viridis = ['#440154', '#440255', '#440357', '#450558', '#45065a', '#45085b',
'#46095c', '#460b5e', '#460c5f', '#460e61', '#470f62', '#471163',
'#471265', '#471466', '#471567', '#471669', '#47186a', '#48196b',
'#481a6c', '#481c6e', '#481d6f', '#481e70', '#482071', '#482172',
'#482273', '#482374', '#472575', '#472676', '#472777', '#472878',
'#472a79', '#472b7a', '#472c7b', '#462d7c', '#462f7c', '#46307d',
'#46317e', '#45327f', '#45347f', '#453580', '#453681', '#443781',
'#443982', '#433a83', '#433b83', '#433c84', '#423d84', '#423e85',
'#424085', '#414186', '#414286', '#404387', '#404487', '#3f4587',
'#3f4788', '#3e4888', '#3e4989', '#3d4a89', '#3d4b89', '#3d4c89',
'#3c4d8a', '#3c4e8a', '#3b508a', '#3b518a', '#3a528b', '#3a538b',
'#39548b', '#39558b', '#38568b', '#38578c', '#37588c', '#37598c',
'#365a8c', '#365b8c', '#355c8c', '#355d8c', '#345e8d', '#345f8d',
'#33608d', '#33618d', '#32628d', '#32638d', '#31648d', '#31658d',
'#31668d', '#30678d', '#30688d', '#2f698d', '#2f6a8d', '#2e6b8e',
'#2e6c8e', '#2e6d8e', '#2d6e8e', '#2d6f8e', '#2c708e', '#2c718e',
'#2c728e', '#2b738e', '#2b748e', '#2a758e', '#2a768e', '#2a778e',
'#29788e', '#29798e', '#287a8e', '#287a8e', '#287b8e', '#277c8e',
'#277d8e', '#277e8e', '#267f8e', '#26808e', '#26818e', '#25828e',
'#25838d', '#24848d', '#24858d', '#24868d', '#23878d', '#23888d',
'#23898d', '#22898d', '#228a8d', '#228b8d', '#218c8d', '#218d8c',
'#218e8c', '#208f8c', '#20908c', '#20918c', '#1f928c', '#1f938b',
'#1f948b', '#1f958b', '#1f968b', '#1e978a', '#1e988a', '#1e998a',
'#1e998a', '#1e9a89', '#1e9b89', '#1e9c89', '#1e9d88', '#1e9e88',
'#1e9f88', '#1ea087', '#1fa187', '#1fa286', '#1fa386', '#20a485',
'#20a585', '#21a685', '#21a784', '#22a784', '#23a883', '#23a982',
'#24aa82', '#25ab81', '#26ac81', '#27ad80', '#28ae7f', '#29af7f',
'#2ab07e', '#2bb17d', '#2cb17d', '#2eb27c', '#2fb37b', '#30b47a',
'#32b57a', '#33b679', '#35b778', '#36b877', '#38b976', '#39b976',
'#3bba75', '#3dbb74', '#3ebc73', '#40bd72', '#42be71', '#44be70',
'#45bf6f', '#47c06e', '#49c16d', '#4bc26c', '#4dc26b', '#4fc369',
'#51c468', '#53c567', '#55c666', '#57c665', '#59c764', '#5bc862',
'#5ec961', '#60c960', '#62ca5f', '#64cb5d', '#67cc5c', '#69cc5b',
'#6bcd59', '#6dce58', '#70ce56', '#72cf55', '#74d054', '#77d052',
'#79d151', '#7cd24f', '#7ed24e', '#81d34c', '#83d34b', '#86d449',
'#88d547', '#8bd546', '#8dd644', '#90d643', '#92d741', '#95d73f',
'#97d83e', '#9ad83c', '#9dd93a', '#9fd938', '#a2da37', '#a5da35',
'#a7db33', '#aadb32', '#addc30', '#afdc2e', '#b2dd2c', '#b5dd2b',
'#b7dd29', '#bade27', '#bdde26', '#bfdf24', '#c2df22', '#c5df21',
'#c7e01f', '#cae01e', '#cde01d', '#cfe11c', '#d2e11b', '#d4e11a',
'#d7e219', '#dae218', '#dce218', '#dfe318', '#e1e318', '#e4e318',
'#e7e419', '#e9e419', '#ece41a', '#eee51b', '#f1e51c', '#f3e51e',
'#f6e61f', '#f8e621', '#fae622', '#fde724']
# Read in defaults if they exist and overlay
# for contour options of fill, blockfill and lines
global_fill = True
global_lines = True
global_blockfill = False
global_degsym = False
global_viewer = 'display'
defaults_file = os.path.expanduser("~") + '/.cfplot_defaults'
if os.path.exists(defaults_file):
with open(defaults_file) as file:
for line in file:
vals = line.split(' ')
com, val = vals
v = False
if val.strip() == 'True':
v = True
if com == 'blockfill':
global_blockfill = v
if com == 'lines':
global_lines = v
if com == 'fill':
global_fill = v
if com == 'degsym':
global_degsym = v
if com == 'viewer':
global_viewer = val.strip()
# plotvars - global plotting variables
plotvars = pvars(lonmin=-180, lonmax=180, latmin=-90, latmax=90, proj='cyl',
resolution='110m', plot_type=1, boundinglat=0, lon_0=0,
levels=None,
levels_min=None, levels_max=None, levels_step=None,
norm=None, levels_extend='both', xmin=None,
xmax=None, ymin=None, ymax=None, xlog=None, ylog=None,
rows=1, columns=1, file=None, orientation='landscape',
user_mapset=0, user_gset=0, cscale_flag=0, user_levs=0,
user_plot=0, master_plot=None, plot=None, cs=cscale1,
cs_user='cscale1', mymap=None, xticks=None, yticks=None,
xticklabels=None, yticklabels=None, xstep=None, ystep=None,
xlabel=None, ylabel=None, title=None, title_fontsize=15,
axis_label_fontsize=11, text_fontsize=11,
text_fontweight='normal', axis_label_fontweight='normal',
colorbar_fontsize=11, colorbar_fontweight='normal',
title_fontweight='normal', continent_thickness=None,
continent_color=None, continent_linestyle=None,
pos=1, viewer=global_viewer, global_viewer=global_viewer,
tspace_year=None, tspace_month=None, tspace_day=None,
tspace_hour=None, xtick_label_rotation=0,
xtick_label_align='center', ytick_label_rotation=0,
ytick_label_align='right', legend_text_size=11,
legend_text_weight='normal',
cs_uniform=True, master_title=None,
master_title_location=[0.5, 0.95], master_title_fontsize=30,
master_title_fontweight='normal', dpi=None,
plot_xmin=None, plot_xmax=None, plot_ymin=None,
plot_ymax=None, land_color=None, ocean_color=None,
lake_color=None, twinx=False, twiny=False,
rotated_grid_thickness=2.0, rotated_grid_spacing=10,
rotated_deg_spacing=0.75, rotated_continents=True,
rotated_grid=True, rotated_labels=True,
legend_frame=True, legend_frame_edge_color='k',
legend_frame_face_color=None, degsym=global_degsym,
axis_width=None, grid=True, grid_spacing=1,
grid_colour='k', grid_linestyle='--',
grid_thickness=1.0, aspect='equal',
graph_xmin=None, graph_xmax=None,
graph_ymin=None, graph_ymax=None,
level_spacing=None, tight=False)
# Check for iPython notebook inline
# and set the viewer to None if found
is_inline = 'inline' in matplotlib.get_backend()
if is_inline:
plotvars.viewer = None
# Check for OSX and if so use matplotlib for for the viewer
# Not all users will have ImageMagick installed / XQuartz running
# Users can still select this with cfp.setvars(viewer='display')
if sys.platform == 'darwin':
plotvars.global_viewer = 'matplotlib'
plotvars.viewer = 'matplotlib'
def con(f=None, x=None, y=None, fill=global_fill, lines=global_lines, line_labels=True,
title=None, colorbar_title=None, colorbar=True,
colorbar_label_skip=None, ptype=0, negative_linestyle='solid',
blockfill=global_blockfill, zero_thick=False, colorbar_shrink=None,
colorbar_orientation=None, colorbar_position=None, xlog=False,
ylog=False, axes=True, xaxis=True, yaxis=True, xticks=None,
xticklabels=None, yticks=None, yticklabels=None, xlabel=None,
ylabel=None, colors='k', swap_axes=False, verbose=None,
linewidths=None, alpha=1.0, colorbar_text_up_down=False,
colorbar_fontsize=None, colorbar_fontweight=None,
colorbar_text_down_up=False, colorbar_drawedges=True,
colorbar_fraction=None, colorbar_thick=None,
colorbar_anchor=None, colorbar_labels=None,
linestyles=None, zorder=1, level_spacing=None,
ugrid=False, face_lons=False, face_lats=False, face_connectivity=False,
titles=False):
"""
| con is the interface to contouring in cf-plot. The minimum use is con(f)
| where f is a 2 dimensional array. If a cf field is passed then an
| appropriate plot will be produced i.e. a longitude-latitude or
| latitude-height plot for example. If a 2d numeric array is passed then
| the optional arrays x and y can be used to describe the x and y data
| point locations.
|
| f - array to contour
| x - x locations of data in f (optional)
| y - y locations of data in f (optional)
| fill=True - colour fill contours
| lines=True - draw contour lines and labels
| line_labels=True - label contour lines
| title=title - title for the plot
| ptype=0 - plot type - not needed for cf fields.
| 0 = no specific plot type,
| 1 = longitude-latitude,
| 2 = latitude - height,
| 3 = longitude - height,
| 4 = latitude - time,
| 5 = longitude - time
| 6 = rotated pole
| negative_linestyle='solid' - set to one of 'solid', 'dashed'
| zero_thick=False - add a thick zero contour line. Set to 3 for example.
| blockfill=False - set to True for a blockfill plot
| colorbar_title=colbar_title - title for the colour bar
| colorbar=True - add a colour bar if a filled contour plot
| colorbar_label_skip=None - skip colour bar labels. Set to 2 to skip
| every other label.
| colorbar_orientation=None - options are 'horizontal' and 'vertical'
| The default for most plots is horizontal but
| for polar stereographic plots this is vertical.
| colorbar_shrink=None - value to shrink the colorbar by. If the colorbar
| exceeds the plot area then values of 1.0, 0.55
| or 0.5m may help it better fit the plot area.
| colorbar_position=None - position of colorbar
| [xmin, ymin, x_extent,y_extent] in normalised
| coordinates. Use when a common colorbar
| is required for a set of plots. A typical set
| of values would be [0.1, 0.05, 0.8, 0.02]
| colorbar_fontsize=None - text size for colorbar labels and title
| colorbar_fontweight=None - font weight for colorbar labels and title
| colorbar_text_up_down=False - if True horizontal colour bar labels alternate
| above (start) and below the colour bar
| colorbar_text_down_up=False - if True horizontal colour bar labels alternate
| below (start) and above the colour bar
| colorbar_drawedges=True - draw internal divisions in the colorbar
| colorbar_fraction=None - space for the colorbar - default = 0.21, in normalised
| coordinates
| colorbar_thick=None - thickness of the colorbar - default = 0.015, in normalised
| coordinates
| colorbar_anchor=None - default=0.5 - anchor point of colorbar within the fraction space.
| 0.0 = close to plot, 1.0 = further away
| colorbar_labels=None - labels to use for colorbar. The default is to use the contour
| levels as labels
| colorbar_text_up_down=False - on a horizontal colorbar alternate the
| labels top and bottom starting in the up position
| colorbar_text_down_up=False - on a horizontal colorbar alternate the
| labels bottom and top starting in the bottom position
| colorbar_drawedges=True - draw internal delimeter lines in the colorbar
| colors='k' - contour line colors - takes one or many values.
| xlog=False - logarithmic x axis
| ylog=False - logarithmic y axis
| axes=True - plot x and y axes
| xaxis=True - plot xaxis
| yaxis=True - plot y axis
| xticks=None - xtick positions
| xticklabels=None - xtick labels
| yticks=None - y tick positions
| yticklabels=None - ytick labels
| xlabel=None - label for x axis
| ylabel=None - label for y axis
| swap_axes=False - swap plotted axes - only valid for X, Y, Z vs T plots
| verbose=None - change to 1 to get a verbose listing of what con
| is doing
| linewidths=None - contour linewidths. Either a single number for all
| lines or array of widths
| linestyles=None - takes 'solid', 'dashed', 'dashdot' or 'dotted'
| alpha=1.0 - transparency setting. The default is no transparency.
| zorder=1 - order of drawing
| level_spacing=None - Default of 'linear' level spacing. Also takes 'log', 'loglike',
| 'outlier' and 'inspect'
| ugrid=False - flag for contouring ugrid data
| face_lons=None - longitude points for face vertices
| face_lats=None - latitude points for face verticies
| face_connectivity=None - connectivity for face verticies
| titles=False - set to True to have a dimensions title
:Returns:
None
"""
# Turn off divide warning in contour routine which is a numpy issue
old_settings = np.seterr(all='ignore')
np.seterr(divide='ignore')
# Set potential user axis labels
user_xlabel = xlabel
user_ylabel = ylabel
# Test for UGRID blockfill
#ugrid_blockfill = False
#if face_lons and face_lats and face_connectivity:
# print('ajh - in ugrid blockfill')
# ugrid_blockfill = True
# field = np.squeeze(f.array)
# x = np.squeeze(face_lons.array)
# y = np.squeeze(face_lats.array)
# Extract data for faces if a UGRID blockplot
blockfill_ugrid = False
if face_lons and face_lats and face_connectivity:
blockfill_ugrid = True
fill = False
ugrid = True
if isinstance(f, cf.Field):
field = f.array
else:
field = f
field_orig = deepcopy(field)
if isinstance(face_lons, cf.Field):
face_lons_array = face_lons.array
else:
face_lons_array = face_lons
if isinstance(face_lats, cf.Field):
face_lats_array = face_lats.array
else:
face_lats_array = face_lats
if isinstance(face_connectivity, cf.Field):
face_connectivity_array = face_connectivity.array
else:
face_connectivity_array = face_connectivity
# Extract required data for contouring
# If a cf-python field
if isinstance(f, cf.Field):
# Check data is 2D
ndims = np.squeeze(f.data).ndim
ugrid = False
if ndims == 1:
ugrid = True
if ndims > 2:
errstr = "\n\ncfp.con error need a 1 or 2 dimensional field to contour\n"
errstr += "received " + str(np.squeeze(f.data).ndim) + " dimensions\n\n"
raise TypeError(errstr)
# Extract data
if verbose:
print('con - calling cf_data_assign')
#if not ugrid_blockfill:
field, x, y, ptype, colorbar_title, xlabel, ylabel, xpole, ypole =\
cf_data_assign(f, colorbar_title, verbose=verbose)
if user_xlabel is not None:
xlabel = user_xlabel
if user_ylabel is not None:
ylabel = user_ylabel
elif isinstance(f, cf.FieldList):
raise TypeError("\n\ncfp.con - cannot contour a field list\n\n")
else:
if verbose:
print('con - using user assigned data')
field = f # field data passed in as f
if x is None:
x = np.arange(np.shape(field)[1])
if y is None:
y = np.arange(np.shape(field)[0])
check_data(field, x, y)
xlabel = ''
ylabel = ''
# Set contour line styles
matplotlib.rcParams['contour.negative_linestyle'] = negative_linestyle
# Set contour lines off on block plots
if blockfill:
fill = False
field_orig = deepcopy(field)
x_orig = deepcopy(x)
y_orig = deepcopy(y)
# Check number of colours and levels match if user has modified the
# number of colours
if plotvars.cscale_flag == 2:
ncols = np.size(plotvars.cs)
nintervals = np.size(plotvars.levels) - 1
if plotvars.levels_extend == 'min':
nintervals += 1
if plotvars.levels_extend == 'max':
nintervals += 1
if plotvars.levels_extend == 'both':
nintervals += 2
if ncols != nintervals:
errstr = "\n\ncfp.con - blockfill error \n"
errstr += "need to match number of colours and contour intervals\n"
errstr += "Don't forget to take account of the colorbar "
errstr += "extensions\n\n"
raise TypeError(errstr)
# Turn off colorbar if fill is turned off
if not fill and not blockfill and not blockfill_ugrid:
colorbar = False
# Revert to default colour scale if cscale_flag flag is set
if plotvars.cscale_flag == 0:
plotvars.cs = cscale1
# Set the orientation of the colorbar
if plotvars.plot_type == 1:
if plotvars.proj == 'npstere' or plotvars.proj == 'spstere':
if colorbar_orientation is None:
colorbar_orientation = 'vertical'
if colorbar_orientation is None:
colorbar_orientation = 'horizontal'
# Store original map resolution
resolution_orig = plotvars.resolution
# Set size of color bar if not specified
if colorbar_shrink is None:
colorbar_shrink = 1.0
if plotvars.proj == 'npstere' or plotvars.proj == 'spstere':
colorbar_shrink = 0.8
# Set plot type if user specified
if (ptype is not None):
plotvars.plot_type = ptype
# Get contour levels if none are defined
spacing = 'linear'
if plotvars.level_spacing is not None:
spacing = plotvars.level_spacing
if level_spacing is not None:
spacing = level_spacing
if plotvars.levels is None:
clevs, mult, fmult = calculate_levels(field=field,
level_spacing=spacing,
verbose=verbose)
else:
clevs = plotvars.levels
mult = 0
fmult = 1
# Set the colour scale if nothing is defined
includes_zero = False
if plotvars.cscale_flag == 0:
col_zero = 0
for cval in clevs:
if includes_zero is False:
col_zero = col_zero + 1
if cval == 0:
includes_zero = True
if includes_zero:
cs_below = col_zero
cs_above = np.size(clevs) - col_zero + 1
if plotvars.levels_extend == 'max' or plotvars.levels_extend == 'neither':
cs_below = cs_below - 1
if plotvars.levels_extend == 'min' or plotvars.levels_extend == 'neither':
cs_above = cs_above - 1
uniform = True
if plotvars.cs_uniform is False:
uniform = False
cscale('scale1', below=cs_below, above=cs_above, uniform=uniform)
else:
ncols = np.size(clevs)+1
if plotvars.levels_extend == 'min' or plotvars.levels_extend == 'max':
ncols = ncols-1
if plotvars.levels_extend == 'neither':
ncols = ncols-2
cscale('viridis', ncols=ncols)
plotvars.cscale_flag = 0
# User selected colour map but no mods so fit to levels
if plotvars.cscale_flag == 1:
ncols = np.size(clevs)+1
if plotvars.levels_extend == 'min' or plotvars.levels_extend == 'max':
ncols = ncols-1
if plotvars.levels_extend == 'neither':
ncols = ncols-2
cscale(plotvars.cs_user, ncols=ncols)
plotvars.cscale_flag = 1
# Set colorbar labels
# Set a sensible label spacing if the user hasn't already done so
if colorbar_label_skip is None:
if colorbar_orientation == 'horizontal':
nchars = 0
for lev in clevs:
nchars = nchars + len(str(lev))
colorbar_label_skip = int(nchars / 80 + 1)
if plotvars.columns > 1:
colorbar_label_skip = int(nchars * (plotvars.columns) / 80)
else:
colorbar_label_skip = 1
if colorbar_label_skip > 1:
if includes_zero:
# include zero in the colorbar labels
zero_pos = [i for i, mylev in enumerate(clevs) if mylev == 0][0]
cbar_labels = clevs[zero_pos]
i = zero_pos + colorbar_label_skip
while i <= len(clevs) - 1:
cbar_labels = np.append(cbar_labels, clevs[i])
i = i + colorbar_label_skip
i = zero_pos - colorbar_label_skip
if i >= 0:
while i >= 0:
cbar_labels = np.append(clevs[i], cbar_labels)
i = i - colorbar_label_skip
else:
cbar_labels = clevs[0]
i = int(colorbar_label_skip)
while i <= len(clevs) - 1:
cbar_labels = np.append(cbar_labels, clevs[i])
i = i + colorbar_label_skip
else:
cbar_labels = clevs
if colorbar_label_skip is None:
colorbar_label_skip = 1
# Make a list of strings of the colorbar levels for later labelling
clabels = []
for i in cbar_labels:
clabels.append(str(i))
if colorbar_label_skip > 1:
for skip in np.arange(colorbar_label_skip - 1):
clabels.append('')
if colorbar_labels is not None:
cbar_labels = colorbar_labels
else:
cbar_labels = clabels
# Turn off line_labels if the field is all the same
# Matplotlib 3.2.2 throws an error if there are no line labels
if np.nanmin(field) == np.nanmax(field):
line_labels = False
# Add mult to colorbar_title if used
if (colorbar_title is None):
colorbar_title = ''
if (mult != 0):
colorbar_title = colorbar_title + ' *10$^{' + str(mult) + '}$'
# Catch null title
if title is None:
title = ''
if plotvars.title is not None:
title = plotvars.title
# Set plot variables
title_fontsize = plotvars.title_fontsize
text_fontsize = plotvars.text_fontsize
if colorbar_fontsize is None:
colorbar_fontsize = plotvars.colorbar_fontsize
if colorbar_fontweight is None:
colorbar_fontweight = plotvars.colorbar_fontweight
continent_thickness = plotvars.continent_thickness
continent_color = plotvars.continent_color
continent_linestyle = plotvars.continent_linestyle
land_color = plotvars.land_color
ocean_color = plotvars.ocean_color
lake_color = plotvars.lake_color
title_fontweight = plotvars.title_fontweight
if continent_thickness is None:
continent_thickness = 1.5
if continent_color is None:
continent_color = 'k'
if continent_linestyle is None:
continent_linestyle = 'solid'
cb_orient = colorbar_orientation
# Retrieve any user defined axis labels
if xlabel == '' and plotvars.xlabel is not None:
xlabel = plotvars.xlabel
if ylabel == '' and plotvars.ylabel is not None:
ylabel = plotvars.ylabel
if xticks is None and plotvars.xticks is not None:
xticks = plotvars.xticks
if plotvars.xticklabels is not None:
xticklabels = plotvars.xticklabels
else:
xticklabels = list(map(str, xticks))
if yticks is None and plotvars.yticks is not None:
yticks = plotvars.yticks
if plotvars.yticklabels is not None:
yticklabels = plotvars.yticklabels
else:
yticklabels = list(map(str, yticks))
# Calculate a set of dimension titles if requested
if titles:
title_dims = ''
if not colorbar:
title_dims = colorbar_title + '\n'
if isinstance(f, cf.Field):
xtitle, xunits = cf_var_name_titles(f, 'X')
if xtitle is not None:
xvalues = f.construct('X').array
if len(xvalues) > 1:
xvalue = ''
else:
xvalue = str(xvalues)
title_dims += 'x: ' + xtitle + ' ' + xvalue + ' ' + xunits + '\n'
ytitle, yunits = cf_var_name_titles(f, 'Y')
if ytitle is not None:
yvalues = f.construct('Y').array
if len(yvalues) > 1:
yvalue = ''
else:
yvalue = str(yvalues)
title_dims += 'y: ' + ytitle + ' ' + yvalue + ' ' + yunits + '\n'
ztitle, zunits = cf_var_name_titles(f, 'Z')
if ztitle is not None:
zvalues = f.construct('Z').array
if len(zvalues) > 1:
zvalue = ''
else:
zvalue = str(zvalues)
title_dims += 'z: ' + ztitle + ' ' + zvalue + ' ' + zunits + '\n'
ttitle, tunits = cf_var_name_titles(f, 'T')
if ztitle is not None:
tvalues = f.construct('T').dtarray
if len(tvalues) > 1:
tvalue = ''
else:
tvalue = str(cf.Data(tvalues).datetime_as_string)
title_dims += 't: ' + ttitle + ' ' + tvalue + '\n'
if len(f.cell_methods()) > 0:
title_dims += 'cell methods: '
i = 0
for method in f.cell_methods():
axis = f.cell_methods()[method].get_axes()[0]
dim = f.constructs.domain_axis_identity(axis)
collapse = f.cell_methods()[method].method
if i > 0:
title_dims += ', '
title_dims += dim + ': ' + collapse
##################
# Map contour plot
##################
if ptype == 1:
if verbose:
print('con - making a map plot')
# Open a new plot if necessary
if plotvars.user_plot == 0:
gopen(user_plot=0)
# Set up mapping
lonrange = np.nanmax(x) - np.nanmin(x)
latrange = np.nanmax(y) - np.nanmin(y)
# Reset mapping
if plotvars.user_mapset == 0:
plotvars.lonmin = -180
plotvars.lonmax = 180
plotvars.latmin = -90
plotvars.latmax = 90
if (lonrange > 350 and latrange > 170) or plotvars.user_mapset == 1:
set_map()
else:
mapset(lonmin=np.nanmin(x), lonmax=np.nanmax(x),
latmin=np.nanmin(y), latmax=np.nanmax(y),
user_mapset=0, resolution=resolution_orig)
set_map()
mymap = plotvars.mymap
user_mapset = plotvars.user_mapset
lonrange = np.nanmax(x) - np.nanmin(x)
# Extract data for faces if a UGRID blockplot
#blockfill_ugrid = False
#if face_lons and face_lats and face_connectivity:
# print('ugrid blockplot')
# blockfill_ugrid = True
# fill = False
# ugrid = True
# field_orig = deepcopy(field)
# if isinstance(face_lons, cf.Field):
# face_lons_array = face_lons.array
# else:
# face_lons_array = face_lons
# if isinstance(face_lats, cf.Field):
# face_lats_array = face_lats.array
# else:
# face_lats_array = face_lats
# if isinstance(face_connectivity, cf.Field):
# face_connectivity_array = face_connectivity.array
# else:
# face_connectivity_array = face_connectivity
if not blockfill_ugrid:
if not ugrid:
if lonrange > 350 and np.ndim(y) == 1:
# Add cyclic information if missing.
if lonrange < 360:
# field, x = cartopy_util.add_cyclic_point(field, x)
field, x = add_cyclic(field, x)
lonrange = np.nanmax(x) - np.nanmin(x)
# cartopy line drawing fix
if x[-1] - x[0] == 360.0:
x[-1] = x[-1] + 0.001
# Shift grid if needed
if plotvars.lonmin < np.nanmin(x):
x = x - 360
if plotvars.lonmin > np.nanmax(x):
x = x + 360
else:
# Get the ugrid data within the map coordinates
# Matplotlib tricontour cannot plot missing data so we need to split
# the missing data into a separate field to deal with this
field_modified = deepcopy(field)
pts_nan = np.where(np.isnan(field_modified))
field_modified[pts_nan] = -1e30
field_ugrid, lons_ugrid, lats_ugrid = ugrid_window(field_modified, x, y)
#pts_real = np.where(np.isfinite(field_ugrid))
pts_real = np.where(field_ugrid > -1e29)
pts_nan = np.where(field_ugrid < -1e29)
field_ugrid_nan = []
lons_ugrid_nan = []
lats_ugrid_nan = []
if np.size(pts_nan) > 0:
field_ugrid_nan = deepcopy(field_ugrid)
lons_ugrid_nan = deepcopy(lons_ugrid)
lats_ugrid_nan = deepcopy(lats_ugrid)
field_ugrid_nan[:] = 0
field_ugrid_nan[pts_nan] = 1
field_ugrid_real = deepcopy(field_ugrid[pts_real])
lons_ugrid_real = deepcopy(lons_ugrid[pts_real])
lats_ugrid_real = deepcopy(lats_ugrid[pts_real])
if not ugrid:
# Flip latitudes and field if latitudes are in descending order
if np.ndim(y) == 1:
if y[0] > y[-1]:
y = y[::-1]
field = np.flipud(field)
# Plotting a sub-area of the grid produces stray contour labels
# in polar plots. Subsample the latitudes to remove this problem
if plotvars.proj == 'npstere' and np.ndim(y) == 1:
if not blockfill_ugrid:
if ugrid:
pts = np.where(lats_ugrid > plotvars.boundinglat - 5)
pts = np.array(pts).flatten()
lons_ugrid_real = lons_ugrid_real[pts]
lats_ugrid_real = lats_ugrid_real[pts]
field_ugrid_real = field_ugrid_real[pts]
else:
myypos = find_pos_in_array(vals=y, val=plotvars.boundinglat)
if myypos != -1:
y = y[myypos:]
field = field[myypos:, :]
if plotvars.proj == 'spstere' and np.ndim(y) == 1:
if not blockfill_ugrid:
if ugrid:
pts = np.where(lats_ugrid_real < plotvars.boundinglat + 5)
lons_ugrid_real = lons_ugrid_real[pts]
lats_ugrid_real = lats_ugrid_real[pts]
field_ugrid_real = field_ugrid_real[pts]
else:
myypos = find_pos_in_array(vals=y, val=plotvars.boundinglat, above=True)
if myypos != -1:
y = y[0:myypos + 1]
field = field[0:myypos + 1, :]
# Set the longitudes and latitudes
lons, lats = x, y
# Set the plot limits
if lonrange > 350:
gset(
xmin=plotvars.lonmin,
xmax=plotvars.lonmax,
ymin=plotvars.latmin,
ymax=plotvars.latmax,
user_gset=0)
else:
if user_mapset == 1:
gset(xmin=plotvars.lonmin,
xmax=plotvars.lonmax,
ymin=plotvars.latmin,
ymax=plotvars.latmax,
user_gset=0)
else:
gset(xmin=np.nanmin(lons),
xmax=np.nanmax(lons),
ymin=np.nanmin(lats),
ymax=np.nanmax(lats),
user_gset=0)
# Filled contours
if fill:
if verbose:
print('con - adding filled contours')
# Get colour scale for use in contouring
# If colour bar extensions are enabled then the colour map goes
# from 1 to ncols-2. The colours for the colour bar extensions
# are then changed on the colorbar and plot after the plot is made
colmap = cscale_get_map()
cmap = matplotlib.colors.ListedColormap(colmap)
if (plotvars.levels_extend ==
'min' or plotvars.levels_extend == 'both'):
cmap.set_under(plotvars.cs[0])
if (plotvars.levels_extend ==
'max' or plotvars.levels_extend == 'both'):
cmap.set_over(plotvars.cs[-1])
# Filled colour contours
if not ugrid:
mymap.contourf(lons, lats, field * fmult, clevs,
extend=plotvars.levels_extend,
cmap=cmap, norm=plotvars.norm,
alpha=alpha, transform=ccrs.PlateCarree(),
zorder=zorder)
else:
if np.size(field_ugrid_real) > 0:
mymap.tricontourf(lons_ugrid_real, lats_ugrid_real, field_ugrid_real * fmult,
clevs, extend=plotvars.levels_extend,
cmap=cmap, norm=plotvars.norm,
alpha=alpha, transform=ccrs.PlateCarree(),
zorder=zorder)
# Block fill
if blockfill:
if verbose:
print('con - adding blockfill')
if isinstance(f, cf.Field):
if f.ref('grid_mapping_name:transverse_mercator', default=False):
# Special case for transverse mercator
bfill(f=f, clevs=clevs, alpha=alpha, zorder=zorder)
else:
if f.coord('X').has_bounds():
xpts = np.squeeze(f.coord('X').bounds.array[:, 0])
ypts = np.squeeze(f.coord('Y').bounds.array[:, 0])
# Add last longitude point
xpts = np.append(xpts, f.coord('X').bounds.array[-1, 1])
# Add last latitude point
ypts = np.append(ypts, f.coord('Y').bounds.array[-1, 1])
bfill(f=field_orig * fmult, x=xpts, y=ypts, clevs=clevs,
lonlat=1, bound=1, alpha=alpha, zorder=zorder)
else:
bfill(f=field_orig * fmult, x=x_orig, y=y_orig, clevs=clevs,
lonlat=1, bound=0, alpha=alpha, zorder=zorder)
else:
bfill(f=field_orig * fmult, x=x_orig, y=y_orig, clevs=clevs,
lonlat=1, bound=0, alpha=alpha, zorder=zorder)
# Block fill for ugrid
if blockfill_ugrid:
if verbose:
print('con - adding blockfill for UGRID')
bfill_ugrid(f=field_orig * fmult, face_lons=face_lons_array,
face_lats=face_lats_array,
face_connectivity=face_connectivity_array, clevs=clevs,
alpha=alpha, zorder=zorder)
# Contour lines and labels
if lines:
if verbose:
print('con - adding contour lines and labels')
if not ugrid:
cs = mymap.contour(lons, lats, field * fmult, clevs, colors=colors,
linewidths=linewidths, linestyles=linestyles, alpha=alpha,
transform=ccrs.PlateCarree(), zorder=zorder)
else:
cs = mymap.tricontour(lons_ugrid_real, lats_ugrid_real, field_ugrid_real * fmult,
clevs, colors=colors,
linewidths=linewidths, linestyles=linestyles, alpha=alpha,
transform=ccrs.PlateCarree(), zorder=zorder)
if line_labels:
nd = ndecs(clevs)
fmt = '%d'
if nd != 0:
fmt = '%1.' + str(nd) + 'f'
plotvars.plot.clabel(cs, levels=clevs, fmt=fmt, zorder=zorder, colors=colors,
fontsize=text_fontsize)
# Thick zero contour line
if zero_thick:
cs = mymap.contour(lons, lats, field * fmult, [-1e-32, 0],
colors=colors, linewidths=zero_thick,
linestyles=linestyles, alpha=alpha,
transform=ccrs.PlateCarree(), zorder=zorder)
# Add a ugrid mask if there is one
if ugrid and not blockfill_ugrid:
if np.size(field_ugrid_nan) > 0:
cmap_white = matplotlib.colors.ListedColormap([1.0, 1.0, 1.0])
mymap.tricontourf(lons_ugrid_nan, lats_ugrid_nan, field_ugrid_nan , [0.5, 1.5],
extend='neither',
cmap=cmap_white, norm=plotvars.norm,
alpha=alpha, transform=ccrs.PlateCarree(),
zorder=zorder)
# Axes
plot_map_axes(axes=axes, xaxis=xaxis, yaxis=yaxis,
xticks=xticks, xticklabels=xticklabels,
yticks=yticks, yticklabels=yticklabels,
user_xlabel=user_xlabel, user_ylabel=user_ylabel,
verbose=verbose)
# Coastlines and features
feature = cfeature.NaturalEarthFeature(name='land',
category='physical',
scale=plotvars.resolution,
facecolor='none')
mymap.add_feature(feature,
edgecolor=continent_color,
linewidth=continent_thickness,
linestyle=continent_linestyle,
zorder=zorder)
if ocean_color is not None:
mymap.add_feature(cfeature.OCEAN, edgecolor='face', facecolor=ocean_color,
zorder=999)
if land_color is not None:
mymap.add_feature(cfeature.LAND, edgecolor='face', facecolor=land_color,
zorder=999)
if lake_color is not None:
mymap.add_feature(cfeature.LAKES, edgecolor='face', facecolor=lake_color,
zorder=999)
# Title
if title != '':
map_title(title)
# Titles for dimensions
if titles:
dim_titles(title_dims, dims=True)
# Color bar
if colorbar:
cbar(labels=cbar_labels, orientation=cb_orient, position=colorbar_position,
shrink=colorbar_shrink, title=colorbar_title, fontsize=colorbar_fontsize,
fontweight=colorbar_fontweight, text_up_down=colorbar_text_up_down,
text_down_up=colorbar_text_down_up, drawedges=colorbar_drawedges,
fraction=colorbar_fraction, thick=colorbar_thick,
anchor=colorbar_anchor, levs=clevs, verbose=verbose)
# Reset plot limits if not a user plot
if plotvars.user_gset == 0:
gset()
################################################
# Latitude, longitude or time vs Z contour plots
################################################
if ptype == 2 or ptype == 3 or ptype == 7:
if verbose:
if ptype == 2:
print('con - making a latitude-pressure plot')
if ptype == 3:
print('con - making a longitude-pressure plot')
if ptype == 7:
print('con - making a time-pressure plot')
# Work out which way is up
positive = None
if isinstance(f, cf.Field):
if hasattr(f.construct('Z'), 'positive'):
positive = f.construct('Z').positive
else:
errstr = "\ncf-plot - data error \n"
errstr += "data needs a vertical coordinate direction"
errstr += " as required in CF data conventions"
errstr += "\nMaking a contour plot assuming positive is down\n\n"
errstr += "If this is incorrect the data needs to be modified to \n"
errstr += "include a correct value for the direction attribute\n"
errstr += "such as in f.coord(\'Z\').postive=\'down\'"
errstr += "\n\n"
print(errstr)
positive = 'down'
else:
positive = 'down'
if 'theta' in ylabel.split(' '):
positive = 'up'
if 'height' in ylabel.split(' '):
positive = 'up'
if plotvars.user_plot == 0:
gopen(user_plot=0)
# Use gset parameter of ylog if user has set this
if plotvars.ylog is True or plotvars.ylog == 1:
ylog = True
# Set plot limits
user_gset = plotvars.user_gset
if user_gset == 0:
# Program selected data plot limits
xmin = np.nanmin(x)
if xmin < -80 and xmin >= -90:
xmin = -90
xmax = np.nanmax(x)
if xmax > 80 and xmax <= 90:
xmax = 90
if positive == 'down':
ymin = np.nanmax(y)
ymax = np.nanmin(y)
if ymax < 10:
ymax = 0
else:
ymin = np.nanmin(y)
ymax = np.nanmax(y)
else:
# Use user specified plot limits
xmin = plotvars.xmin
xmax = plotvars.xmax
ymin = plotvars.ymin
ymax = plotvars.ymax
ystep = 100
myrange = abs(ymax - ymin)
if myrange < 1:
ystep = abs(ymax - ymin)/10.
if abs(ymax - ymin) > 1:
ystep = 1
if abs(ymax - ymin) > 10:
ystep = 10
if abs(ymax - ymin) > 100:
ystep = 100
if abs(ymax - ymin) > 1000:
ystep = 200
if abs(ymax - ymin) > 2000:
ystep = 500
if abs(ymax - ymin) > 5000:
ystep = 1000
if abs(ymax - ymin) > 15000:
ystep = 5000
# Work out ticks and tick labels
if ylog is False or ylog == 0:
heightticks = gvals(dmin=min(ymin, ymax),
dmax=max(ymin, ymax),
mystep=ystep, mod=False)[0]
if myrange < 1 and myrange > 0.1:
heightticks = np.arange(10)/10.0
else:
heightticks = []
for tick in 1000, 100, 10, 1:
if tick >= min(ymin, ymax) and tick <= max(ymin, ymax):
heightticks.append(tick)
heightlabels = heightticks
if axes:
if xaxis:
if xticks is not None:
if xticklabels is None:
xticklabels = xticks
else:
xticks = [100000000]
xticklabels = xticks
xlabel = ''
if yaxis:
if yticks is not None:
heightticks = yticks
heightlabels = yticks
if yticklabels is not None:
heightlabels = yticklabels
else:
heightticks = [100000000]
ylabel = ''
else:
xticks = [100000000]
xticklabels = xticks
heightticks = [100000000]
heightlabels = heightticks
xlabel = ''
ylabel = ''
if yticks is None:
yticks = heightticks
yticklabels = heightlabels
# Time - height contour plot
if ptype == 7:
if isinstance(f, cf.Field):
if plotvars.user_gset == 0:
tmin = f.construct('T').dtarray[0]
tmax = f.construct('T').dtarray[-1]
else:
# Use user set values if present
tmin = plotvars.xmin
tmax = plotvars.xmax
ref_time = f.construct('T').units
ref_calendar = f.construct('T').calendar
time_units = cf.Units(ref_time, ref_calendar)
t = cf.Data(cf.dt(tmin), units=time_units)
xmin = t.array
t = cf.Data(cf.dt(tmax), units=time_units)
xmax = t.array
if xticks is None and xaxis:
if ptype == 2:
xticks, xticklabels = mapaxis(min=xmin, max=xmax, type=2) # lat-pressure
if ptype == 3:
xticks, xticklabels = mapaxis(min=xmin, max=xmax, type=1) # lon-pressure
if ptype == 7:
# time-pressure
if isinstance(f, cf.Field):
# Change plotvars.xmin and plotvars.xmax from a date string
# to a number
ref_time = f.construct('T').units
ref_calendar = f.construct('T').calendar
time_units = cf.Units(ref_time, ref_calendar)
t = cf.Data(cf.dt(tmin), units=time_units)
xmin = t.array
t = cf.Data(cf.dt(tmax), units=time_units)
xmax = t.array
taxis = cf.Data(
[cf.dt(tmin), cf.dt(tmax)], units=time_units)
time_ticks, time_labels, tlabel = timeaxis(taxis)
# Use user supplied labels if present
if user_xlabel is None:
xlabel = tlabel
if xticks is None:
xticks = time_ticks
if xticklabels is None:
xticklabels = time_labels
else:
errstr = '\nNot a CF field\nPlease use ptype=0 and '
errstr = errstr + 'specify axis labels manually\n'
raise Warning(errstr)
# Set plot limits
if ylog is False or ylog == 0:
gset(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax,
user_gset=user_gset)
else:
if ymax == 0:
ymax = 1 # Avoid zero in a log plot
gset(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax,
ylog=True, user_gset=user_gset)
# Label axes
axes_plot(xticks=xticks, xticklabels=xticklabels, yticks=heightticks,
yticklabels=heightlabels, xlabel=xlabel, ylabel=ylabel)
# Get colour scale for use in contouring
# If colour bar extensions are enabled then the colour map goes
# from 1 to ncols-2. The colours for the colour bar extensions are
# then changed on the colorbar and plot after the plot is made
colmap = cscale_get_map()
# Filled contours
if fill:
colmap = cscale_get_map()
cmap = matplotlib.colors.ListedColormap(colmap)
if (plotvars.levels_extend ==
'min' or plotvars.levels_extend == 'both'):
cmap.set_under(plotvars.cs[0])
if (plotvars.levels_extend ==
'max' or plotvars.levels_extend == 'both'):
cmap.set_over(plotvars.cs[-1])
plotvars.plot.contourf(x, y, field * fmult, clevs,
extend=plotvars.levels_extend,
cmap=cmap,
norm=plotvars.norm, alpha=alpha,
zorder=zorder)
# Block fill
if blockfill:
if isinstance(f, cf.Field):
hasbounds = True
if ptype == 2:
if f.coord('Y').has_bounds():
xpts = np.squeeze(f.coord('Y').bounds.array)[:, 0]
xpts = np.append(xpts, f.coord('Y').bounds.array[-1, 1])
ypts = np.squeeze(f.coord('Z').bounds.array)[:, 0]
ypts = np.append(ypts, f.coord('Z').bounds.array[-1, 1])
else:
hasbounds = False
if ptype == 3:
if f.coord('X').has_bounds():
xpts = np.squeeze(f.coord('X').bounds.array)[:, 0]
xpts = np.append(xpts, f.coord('X').bounds.array[-1, 1])
ypts = np.squeeze(f.coord('Z').bounds.array)[:, 0]
ypts = np.append(xpts, f.coord('Z').bounds.array[-1, 1])
else:
hasbounds = False
if ptype == 7:
if f.coord('T').has_bounds():
xpts = np.squeeze(f.coord('T').bounds.array)[:, 0]
xpts = np.append(xpts, f.coord('T').bounds.array[-1, 1])
ypts = np.squeeze(f.coord('Z').bounds.array)[:, 0]
ypts = np.append(xpts, f.coord('Z').bounds.array[-1, 1])
else:
hasbounds = False
if hasbounds:
bfill(f=field_orig * fmult, x=xpts, y=ypts, clevs=clevs,
lonlat=0, bound=1, alpha=alpha, zorder=zorder)
else:
bfill(f=field_orig * fmult, x=x_orig, y=y_orig, clevs=clevs,
lonlat=0, bound=0, alpha=alpha, zorder=zorder)
else:
bfill(f=field_orig * fmult, x=x_orig, y=y_orig, clevs=clevs,
lonlat=0, bound=0, alpha=alpha, zorder=zorder)
# Contour lines and labels
if lines:
cs = plotvars.plot.contour(
x, y, field * fmult, clevs, colors=colors,
linewidths=linewidths, linestyles=linestyles, zorder=zorder)
if line_labels:
nd = ndecs(clevs)
fmt = '%d'
if nd != 0:
fmt = '%1.' + str(nd) + 'f'
plotvars.plot.clabel(cs,fmt=fmt,colors=colors, zorder=zorder,
fontsize=text_fontsize)
# Thick zero contour line
if zero_thick:
cs = plotvars.plot.contour(x, y, field * fmult,
[-1e-32, 0], colors=colors,
linewidths=zero_thick,
linestyles=linestyles, alpha=alpha,
zorder=zorder)
# Titles for dimensions
if titles:
dim_titles(title_dims, dims=True)
# Color bar
if colorbar:
cbar(labels=cbar_labels,
orientation=cb_orient,
position=colorbar_position,
shrink=colorbar_shrink,
title=colorbar_title,
fontsize=colorbar_fontsize,
fontweight=colorbar_fontweight,
text_up_down=colorbar_text_up_down,
text_down_up=colorbar_text_down_up,
drawedges=colorbar_drawedges,
fraction=colorbar_fraction,
thick=colorbar_thick,
levs=clevs,
anchor=colorbar_anchor,
verbose=verbose)
# Title
plotvars.plot.set_title(title, y=1.03, fontsize=title_fontsize,
fontweight=title_fontweight)
# Reset plot limits to those supplied by the user
if user_gset == 1 and ptype == 7:
gset(xmin=tmin, xmax=tmax, ymin=ymin, ymax=ymax,
user_gset=user_gset)
# reset plot limits if not a user plot
if plotvars.user_gset == 0:
gset()
########################
# Hovmuller contour plot
########################
if (ptype == 4 or ptype == 5):
if verbose:
print('con - making a Hovmuller plot')
yplotlabel = 'Time'
if ptype == 4:
xplotlabel = 'Longitude'
if ptype == 5:
xplotlabel = 'Latitude'
user_gset = plotvars.user_gset
# Time strings set to None initially
tmin = None
tmax = None
# Set plot limits
if all(val is not None for val in [
plotvars.xmin, plotvars.xmax, plotvars.ymin, plotvars.ymax]):
# Store time strings for later use
tmin = plotvars.ymin
tmax = plotvars.ymax
# Check data has CF attributes needed
check_units = check_units = True
check_calendar = True
check_Units_reftime = True
if hasattr(f.construct('T'), 'units') is False:
check_units = False
if hasattr(f.construct('T'), 'calendar') is False:
check_calendar = False
if hasattr(f.construct('T'), 'Units'):
if not hasattr(f.construct('T').Units, 'reftime'):
check_Units_reftime = False
else:
check_Units_reftime = False
if False in [check_units, check_calendar, check_Units_reftime]:
print('\nThe required CF time information to make the plot')
print('is not available please fix the following before')
print('trying to plot again')
if check_units is False:
print('Time axis missing: units')
if check_calendar is False:
print('Time axis missing: calendar')
if check_Units_reftime is False:
print('Time axis missing: Units.reftime')
return
# Change from date string in ymin and ymax to date as a float
ref_time = f.construct('T').units
ref_calendar = f.construct('T').calendar
time_units = cf.Units(ref_time, ref_calendar)
t = cf.Data(cf.dt(plotvars.ymin), units=time_units)
ymin = t.array
t = cf.Data(cf.dt(plotvars.ymax), units=time_units)
ymax = t.array
xmin = plotvars.xmin
xmax = plotvars.xmax
else:
xmin = np.nanmin(x)
xmax = np.nanmax(x)
ymin = np.nanmin(y)
ymax = np.nanmax(y)
# Extract axis labels
if len(f.constructs('T')) > 1:
errstr = "\n\nTime axis error - only one time axis allowed\n "
errstr += "Please list time axes with print(f.constructs())\n"
errstr += "and remove the ones not needed for a hovmuller plot \n"
errstr += "with f.del_construct('unwanted_time_axis')\n"
errstr += "before trying to plot again\n\n\n\n"
raise TypeError(errstr)
time_ticks, time_labels, ylabel = timeaxis(f.construct('T'))
if ptype == 4:
lonlatticks, lonlatlabels = mapaxis(min=xmin, max=xmax, type=1)
if ptype == 5:
lonlatticks, lonlatlabels = mapaxis(min=xmin, max=xmax, type=2)
if axes:
if xaxis:
if xticks is not None:
lonlatticks = xticks
lonlatlabels = xticks
if xticklabels is not None:
lonlatlabels = xticklabels
else:
lonlatticks = [100000000]
xlabel = ''
if yaxis:
if yticks is not None:
timeticks = yticks
timelabels = yticks
if yticklabels is not None:
timelabels = yticklabels
else:
timeticks = [100000000]
ylabel = ''
else:
timeticks = [100000000]
xplotlabel = ''
yplotlabel = ''
if user_xlabel is not None:
xplotlabel = user_xlabel
if user_ylabel is not None:
yplotlabel = user_ylabel
# Use the automatically generated labels if none are supplied
if ylabel is None:
yplotlabel = 'time'
if np.size(time_ticks) > 0:
timeticks = time_ticks
if np.size(time_labels) > 0:
timelabels = time_labels
# Swap axes if requested
if swap_axes:
x, y = y, x
field = np.flipud(np.rot90(field))
xmin, ymin = ymin, xmin
xmax, ymax = ymax, xmax
xplotlabel, yplotlabel = yplotlabel, xplotlabel
lonlatticks, timeticks = timeticks, lonlatticks
lonlatlabels, timelabels = timelabels, lonlatlabels
# Set plot limits
if plotvars.user_plot == 0:
gopen(user_plot=0)
gset(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, user_gset=user_gset)
# Revert to time strings if set
if all(val is not None for val in [tmin, tmax]):
plotvars.ymin = tmin
plotvars.ymax = tmax
# Set and label axes
axes_plot(xticks=lonlatticks, xticklabels=lonlatlabels,
yticks=timeticks, yticklabels=timelabels,
xlabel=xplotlabel, ylabel=yplotlabel)
# Get colour scale for use in contouring
# If colour bar extensions are enabled then the colour map goes
# from 1 to ncols-2. The colours for the colour bar extensions are
# then changed on the colorbar and plot after the plot is made
colmap = cscale_get_map()
# Filled contours
if fill:
colmap = cscale_get_map()
cmap = matplotlib.colors.ListedColormap(colmap)
if (plotvars.levels_extend ==
'min' or plotvars.levels_extend == 'both'):
cmap.set_under(plotvars.cs[0])
if (plotvars.levels_extend ==
'max' or plotvars.levels_extend == 'both'):
cmap.set_over(plotvars.cs[-1])
plotvars.plot.contourf(x, y, field * fmult, clevs,
extend=plotvars.levels_extend,
cmap=cmap,
norm=plotvars.norm, alpha=alpha,
zorder=zorder)
# Block fill
if blockfill:
if isinstance(f, cf.Field):
if f.coord('X').has_bounds():
if ptype == 4:
xpts = np.squeeze(f.coord('X').bounds.array)[:, 0]
xpts = np.append(xpts, f.coord('X').bounds.array[-1, 1])
if ptype == 5:
xpts = np.squeeze(f.coord('Y').bounds.array)[:, 0]
xpts = np.append(xpts, f.coord('Y').bounds.array[-1, 1])
ypts = np.squeeze(f.coord('T').bounds.array)[:, 0]
ypts = np.append(ypts, f.coord('T').bounds.array[-1, 1])
if swap_axes:
xpts, ypts = ypts, xpts
field_orig = np.flipud(np.rot90(field_orig))
bfill(f=field_orig * fmult, x=xpts, y=ypts, clevs=clevs,
lonlat=0, bound=1, alpha=alpha, zorder=zorder)
else:
if swap_axes:
x_orig, y_orig = y_orig, x_orig
field_orig = np.flipud(np.rot90(field_orig))
bfill(f=field_orig * fmult, x=x_orig, y=y_orig, clevs=clevs,
lonlat=0, bound=0, alpha=alpha, zorder=zorder)
else:
if swap_axes:
x_orig, y_orig = y_orig, x_orig
field_orig = np.flipud(np.rot90(field_orig))
bfill(f=field_orig * fmult, x=x_orig, y=y_orig, clevs=clevs,
lonlat=0, bound=0, alpha=alpha, zorder=zorder)
# Contour lines and labels
if lines:
cs = plotvars.plot.contour(x, y, field * fmult, clevs, colors=colors,
linewidths=linewidths, linestyles=linestyles, alpha=alpha)
if line_labels:
nd = ndecs(clevs)
fmt = '%d'
if nd != 0:
fmt = '%1.' + str(nd) + 'f'
plotvars.plot.clabel(cs, fmt=fmt, colors=colors, zorder=zorder,
fontsize=text_fontsize)
# Thick zero contour line
if zero_thick:
cs = plotvars.plot.contour(x, y, field * fmult,
[-1e-32, 0], colors=colors,
linewidths=zero_thick,
linestyles=linestyles, alpha=alpha,
zorder=zorder)
# Titles for dimensions
if titles:
dim_titles(title_dims, dims=True)
# Color bar
if colorbar:
cbar(labels=cbar_labels,
orientation=cb_orient,
position=colorbar_position,
shrink=colorbar_shrink,
title=colorbar_title,
fontsize=colorbar_fontsize,
fontweight=colorbar_fontweight,
text_up_down=colorbar_text_up_down,
text_down_up=colorbar_text_down_up,
drawedges=colorbar_drawedges,
fraction=colorbar_fraction,
thick=colorbar_thick,
levs=clevs,
anchor=colorbar_anchor,
verbose=verbose)
# Title
plotvars.plot.set_title(
title,
y=1.03,
fontsize=title_fontsize,
fontweight=title_fontweight)
# reset plot limits if not a user plot
if user_gset == 0:
gset()
###########################
# Rotated pole contour plot
###########################
if ptype == 6:
# Extract x and y grid points
if plotvars.proj == 'cyl':
xpts = x
ypts = y
else:
xpts = np.arange(np.size(x))
ypts = np.arange(np.size(y))
if verbose:
print('con - making a rotated pole plot')
user_gset = plotvars.user_gset
if plotvars.user_plot == 0:
gopen(user_plot=0)
# Set plot limits
if plotvars.proj == 'rotated':
plotargs = {}
gset(xmin=0, xmax=np.size(xpts) - 1,
ymin=0, ymax=np.size(ypts) - 1,
user_gset=user_gset)
plot = plotvars.plot
if plotvars.proj == 'cyl':
rotated_pole = f.ref('rotated_latitude_longitude')
xpole = rotated_pole['grid_north_pole_longitude']
ypole = rotated_pole['grid_north_pole_latitude']
transform = ccrs.RotatedPole(pole_latitude=ypole,
pole_longitude=xpole)
plotargs = {'transform': transform}
if plotvars.user_mapset == 1:
set_map()
else:
if np.ndim(xpts) == 1:
lonpts, latpts = np.meshgrid(xpts, ypts)
else:
lonpts = xpts
latpts = ypts
points = ccrs.PlateCarree().transform_points(transform, lonpts.flatten(),
latpts.flatten())
lons = np.array(points)[:, 0]
lats = np.array(points)[:, 1]
mapset(lonmin=np.min(lons), lonmax=np.max(lons),
latmin=np.min(lats), latmax=np.max(lats),
user_mapset=0, resolution=resolution_orig)
set_map()
plotargs = {'transform': transform}
plot = plotvars.mymap
# Get colour scale for use in contouring
# If colour bar extensions are enabled then the colour map goes
# from 1 to ncols-2. The colours for the colour bar extensions are
# then changed on the colorbar and plot after the plot is made
colmap = cscale_get_map()
# Filled contours
if fill:
colmap = cscale_get_map()
cmap = matplotlib.colors.ListedColormap(colmap)
if (plotvars.levels_extend ==
'min' or plotvars.levels_extend == 'both'):
cmap.set_under(plotvars.cs[0])
if (plotvars.levels_extend ==
'max' or plotvars.levels_extend == 'both'):
cmap.set_over(plotvars.cs[-1])
plot.contourf(xpts, ypts, field * fmult, clevs,
extend=plotvars.levels_extend,
cmap=cmap,
norm=plotvars.norm, alpha=alpha,
zorder=zorder, **plotargs)
# Block fill
if blockfill:
bfill(f=field_orig * fmult,
x=xpts,
y=ypts,
clevs=clevs,
lonlat=0,
bound=0,
alpha=alpha,
zorder=zorder)
# Contour lines and labels
if lines:
cs = plot.contour(xpts, ypts, field * fmult, clevs, colors=colors,
linewidths=linewidths, linestyles=linestyles,
zorder=zorder, **plotargs)
if line_labels:
nd = ndecs(clevs)
fmt = '%d'
if nd != 0:
fmt = '%1.' + str(nd) + 'f'
plot.clabel(cs, fmt=fmt, colors=colors, zorder=zorder,
fontsize=text_fontsize)
# Thick zero contour line
if zero_thick:
cs = plot.contour(xpts, ypts, field * fmult,
[-1e-32, 0], colors=colors,
linewidths=zero_thick,
linestyles=linestyles, alpha=alpha,
zorder=zorder, **plotargs)
# Titles for dimensions
if titles:
dim_titles(title_dims, dims=True)
# Color bar
if colorbar:
cbar(labels=cbar_labels,
orientation=cb_orient,
position=colorbar_position,
shrink=colorbar_shrink,
title=colorbar_title,
fontsize=colorbar_fontsize,
fontweight=colorbar_fontweight,
text_up_down=colorbar_text_up_down,
text_down_up=colorbar_text_down_up,
drawedges=colorbar_drawedges,
fraction=colorbar_fraction,
thick=colorbar_thick,
levs=clevs,
anchor=colorbar_anchor,
verbose=verbose)
# Rotated grid axes
if axes:
if plotvars.proj == 'cyl':
plot_map_axes(axes=axes, xaxis=xaxis, yaxis=yaxis,
xticks=xticks, xticklabels=xticklabels,
yticks=yticks, yticklabels=yticklabels,
user_xlabel=user_xlabel, user_ylabel=user_ylabel,
verbose=verbose)
else:
rgaxes(xpole=xpole, ypole=ypole, xvec=x, yvec=y,
xticks=xticks, xticklabels=xticklabels,
yticks=yticks, yticklabels=yticklabels,
axes=axes, xaxis=xaxis, yaxis=yaxis,
xlabel=xlabel, ylabel=ylabel)
if plotvars.proj == 'rotated':
# Remove Matplotlib default axis labels
axes_plot(xticks=[100000000], xticklabels=[''],
yticks=[100000000], yticklabels=[''],
xlabel='', ylabel='')
# Add title and coastlines for cylindrical projection
if plotvars.proj == 'cyl':
# Coastlines
feature = cfeature.NaturalEarthFeature(
name='land', category='physical',
scale=plotvars.resolution,
facecolor='none')
plotvars.mymap.add_feature(feature, edgecolor=continent_color,
linewidth=continent_thickness,
linestyle=continent_linestyle,
zorder=zorder)
# Title
if title != '':
map_title(title)
# Add title for native grid
if plotvars.proj == 'rotated':
# Title
plotvars.plot.set_title(title, y=1.03,
fontsize=title_fontsize,
fontweight=title_fontweight)
# reset plot limits if not a user plot
if plotvars.user_gset == 0:
gset()
#############
# Other plots
#############
if ptype == 0:
if verbose:
print('con - making an other plot')
if plotvars.user_plot == 0:
gopen(user_plot=0)
user_gset = plotvars.user_gset
# Set axis labels to None
xplotlabel = None
yplotlabel = None
cf_field = False
if f is not None:
if isinstance(f, cf.Field):
cf_field = True
# Work out axes if none are supplied
if any(val is None for val in [
plotvars.xmin, plotvars.xmax, plotvars.ymin, plotvars.ymax]):
xmin = np.nanmin(x)
xmax = np.nanmax(x)
ymin = np.nanmin(y)
ymax = np.nanmax(y)
else:
xmin = plotvars.xmin
xmax = plotvars.xmax
ymin = plotvars.ymin
ymax = plotvars.ymax
# Change from date string to a number if strings are passed
time_xstr = False
time_ystr = False
try:
float(xmin)
except Exception:
time_xstr = True
try:
float(ymin)
except Exception:
time_ystr = True
xaxisticks = None
yaxisticks = None
xtimeaxis = False
ytimeaxis = False
if cf_field and f.has_construct('T'):
if np.size(f.construct('T').array) > 1:
taxis = f.construct('T')
data_axes = f.get_data_axes()
count = 1
for d in data_axes:
i = f.constructs.domain_axis_identity(d)
try:
c = f.coordinate(i)
if np.size(c.array) > 1:
test_for_time_axis = False
sn = getattr(c, 'standard_name', 'NoName')
an = c.get_property('axis', 'NoName')
if (sn == 'time' or an == 'T'):
test_for_time_axis = True
if count == 1:
if test_for_time_axis:
ytimeaxis = True
elif count == 2:
if test_for_time_axis:
xtimeaxis = True
count += 1
except ValueError:
print("no sensible coordinates for this axis")
if time_xstr or time_ystr:
ref_time = f.construct('T').units
ref_calendar = f.construct('T').calendar
time_units = cf.Units(ref_time, ref_calendar)
if time_xstr:
t = cf.Data(cf.dt(xmin), units=time_units)
xmin = t.array
t = cf.Data(cf.dt(xmax), units=time_units)
xmax = t.array
taxis = cf.Data([xmin, xmax], units=time_units)
taxis.calendar = ref_calendar
if time_ystr:
t = cf.Data(cf.dt(ymin), units=time_units)
ymin = t.array
t = cf.Data(cf.dt(ymax), units=time_units)
ymax = t.array
taxis = cf.Data([ymin, ymax], units=time_units)
taxis.calendar = ref_calendar
if xtimeaxis:
xaxisticks, xaxislabels, xplotlabel = timeaxis(taxis)
if ytimeaxis:
yaxisticks, yaxislabels, yplotlabel = timeaxis(taxis)
if xaxisticks is None:
xaxisticks = gvals(dmin=xmin, dmax=xmax, mod=False)[0]
xaxislabels = xaxisticks
if yaxisticks is None:
yaxisticks = gvals(dmin=ymax, dmax=ymin, mod=False)[0]
yaxislabels = yaxisticks
if user_xlabel is not None:
xplotlabel = user_xlabel
else:
if xplotlabel is None:
xplotlabel = xlabel
if user_ylabel is not None:
yplotlabel = user_ylabel
else:
if yplotlabel is None:
yplotlabel = ylabel
# Draw axes
if axes:
if xaxis:
if xticks is not None:
xaxisticks = xticks
xaxislabels = xticks
if xticklabels is not None:
xaxislabels = xticklabels
else:
xaxisticks = [100000000]
xlabel = ''
if yaxis:
if yticks is not None:
yaxisticks = yticks
yaxislabels = yticks
if yticklabels is not None:
yaxislabels = yticklabels
else:
yaxisticks = [100000000]
ylabel = ''
else:
xaxisticks = [100000000]
yaxisticks = [100000000]
xlabel = ''
ylabel = ''
# Swap axes if requested
if swap_axes:
x, y = y, x
field = np.flipud(np.rot90(field))
xmin, ymin = ymin, xmin
xmax, ymax = ymax, xmax
xplotlabel, yplotlabel = yplotlabel, xplotlabel
xaxisticks, yaxisticks = yaxisticks, xaxisticks
xaxislabels, yaxislabels = yaxislabels, xaxislabels
# Set plot limits and set default plot labels
gset(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, user_gset=user_gset)
# Draw axes
axes_plot(xticks=xaxisticks, xticklabels=xaxislabels,
yticks=yaxisticks, yticklabels=yaxislabels,
xlabel=xplotlabel, ylabel=yplotlabel)
# Get colour scale for use in contouring
# If colour bar extensions are enabled then the colour map goes
# then from 1 to ncols-2. The colours for the colour bar extensions
# are changed on the colorbar and plot after the plot is made
colmap = cscale_get_map()
# Filled contours
if fill:
colmap = cscale_get_map()
cmap = matplotlib.colors.ListedColormap(colmap)
if (plotvars.levels_extend ==
'min' or plotvars.levels_extend == 'both'):
cmap.set_under(plotvars.cs[0])
if (plotvars.levels_extend ==
'max' or plotvars.levels_extend == 'both'):
cmap.set_over(plotvars.cs[-1])
plotvars.plot.contourf(x, y, field * fmult, clevs,
extend=plotvars.levels_extend,
cmap=cmap,
norm=plotvars.norm, alpha=alpha,
zorder=zorder)
# Block fill
if blockfill:
bfill(f=field_orig * fmult, x=x_orig, y=y_orig, clevs=clevs,
lonlat=0, bound=0, alpha=alpha, zorder=zorder)
# Contour lines and labels
if lines:
cs = plotvars.plot.contour(x, y, field * fmult, clevs, colors=colors,
linewidths=linewidths, linestyles=linestyles,
zorder=zorder)
if line_labels:
nd = ndecs(clevs)
fmt = '%d'
if nd != 0:
fmt = '%1.' + str(nd) + 'f'
plotvars.plot.clabel(cs, fmt=fmt, colors=colors, zorder=zorder,
fontsize=text_fontsize)
# Thick zero contour line
if zero_thick:
cs = plotvars.plot.contour(x, y, field * fmult, [-1e-32, 0],
colors=colors,
linewidths=zero_thick,
linestyles=linestyles, alpha=alpha,
zorder=zorder)
# Titles for dimensions
if titles:
dim_titles(title_dims, dims=True)
# Color bar
if colorbar:
cbar(labels=cbar_labels,
orientation=cb_orient,
position=colorbar_position,
shrink=colorbar_shrink,
title=colorbar_title,
fontsize=colorbar_fontsize,
fontweight=colorbar_fontweight,
text_up_down=colorbar_text_up_down,
text_down_up=colorbar_text_down_up,
drawedges=colorbar_drawedges,
fraction=colorbar_fraction,
thick=colorbar_thick,
levs=clevs,
anchor=colorbar_anchor,
verbose=verbose)
# Title
plotvars.plot.set_title(
title,
y=1.03,
fontsize=title_fontsize,
fontweight=title_fontweight)
# reset plot limits if not a user plot
if plotvars.user_gset == 0:
gset()
############################
# Set axis width if required
############################
if plotvars.axis_width is not None:
for axis in ['top', 'bottom', 'left', 'right']:
plotvars.plot.spines[axis].set_linewidth(plotvars.axis_width)
################################
# Add a master title if reqested
################################
if plotvars.master_title is not None:
location = plotvars.master_title_location
plotvars.master_plot.text(location[0], location[1],
plotvars.master_title,
horizontalalignment='center',
fontweight=plotvars.master_title_fontweight,
fontsize=plotvars.master_title_fontsize)
# Reset map resolution
if plotvars.user_mapset == 0:
mapset()
mapset(resolution=resolution_orig)
##################
# Save or view plot
##################
if plotvars.user_plot == 0:
if verbose:
print('con - saving or viewing plot')
np.seterr(**old_settings) # reset to default numpy error settings
gclose()
def mapset(lonmin=None, lonmax=None, latmin=None, latmax=None, proj='cyl',
boundinglat=0, lon_0=0, lat_0=40, resolution='110m', user_mapset=1,
aspect=None):
"""
| mapset sets the mapping parameters.
|
| lonmin=lonmin - minimum longitude
| lonmax=lonmax - maximum longitude
| latmin=latmin - minimum latitude
| latmax=latmax - maximum latitude
| proj=proj - 'cyl' for cylindrical projection. 'npstere' or 'spstere'
| for northern hemisphere or southern hemisphere polar stereographic.
| ortho, merc, moll, robin and lcc are abreviations for orthographic,
| mercator, mollweide, robinson and lambert conformal projections
| 'rotated' for contour plots on the native rotated grid.
|
| boundinglat=boundinglat - edge of the viewable latitudes in a
| stereographic plot
| lon_0=0 - longitude centre of desired map domain in polar
| stereographic and orthogrphic plots
| lat_0=40 - latitude centre of desired map domain in orthogrphic plots
| resolution='110m' - the map resolution - can be one of '110m',
| '50m' or '10m'. '50m' means 1:50,000,000 and not 50 metre.
| user_mapset=user_mapset - variable to indicate whether a user call
| to mapset has been made.
|
| The default map plotting projection is the cyclindrical equidistant
| projection from -180 to 180 in longitude and -90 to 90 in latitude.
| To change the map view in this projection to over the United Kingdom,
| for example, you would use
| mapset(lonmin=-6, lonmax=3, latmin=50, latmax=60)
| or
| mapset(-6, 3, 50, 60)
|
| The limits are -360 to 720 in longitude so to look at the equatorial
| Pacific you could use
| mapset(lonmin=90, lonmax=300, latmin=-30, latmax=30)
| or
| mapset(lonmin=-270, lonmax=-60, latmin=-30, latmax=30)
|
| The default setting for the cylindrical projection is for 1 degree of
| longitude to have the same size as one degree of latitude. When plotting
| a smaller map setting aspect='auto' turns this off and the map fills the
| plot area. Setting aspect to a number a circle will be stretched such that
| the height is num times the width. aspect=1 is the same as aspect='equal'.
|
| The proj parameter accepts 'npstere' and 'spstere' for northern
| hemisphere or southern hemisphere polar stereographic projections.
| In addition to these the boundinglat parameter sets the edge of the
| viewable latitudes and lon_0 sets the centre of desired map domain.
|
|
|
| Map settings are persistent until a new call to mapset is made. To
| reset to the default map settings use mapset().
:Returns:
None
"""
# Set the continent resolution
plotvars.resolution = resolution
if all(val is None for val in [
lonmin, lonmax, latmin, latmax, aspect]) and proj == 'cyl':
plotvars.lonmin = -180
plotvars.lonmax = 180
plotvars.latmin = -90
plotvars.latmax = 90
plotvars.proj = 'cyl'
plotvars.user_mapset = 0
plotvars.aspect = 'equal'
plotvars.plot_xmin = None
plotvars.plot_xmax = None
plotvars.plot_ymin = None
plotvars.plot_ymax = None
return
# Set the aspect ratio
if aspect is None:
aspect = 'equal'
plotvars.aspect = aspect
if lonmin is None:
lonmin = -180
if lonmax is None:
lonmax = 180
if latmin is None:
latmin = -90
if proj == 'merc':
latmin = -80
if latmax is None:
latmax = 90
if proj == 'merc':
latmax = 80
if proj == 'moll':
lonmin = lon_0 - 180
lonmax = lon_0 + 180
plotvars.lonmin = lonmin
plotvars.lonmax = lonmax
plotvars.latmin = latmin
plotvars.latmax = latmax
plotvars.proj = proj
plotvars.boundinglat = boundinglat
plotvars.lon_0 = lon_0
plotvars.lat_0 = lat_0
plotvars.user_mapset = user_mapset
def levs(min=None, max=None, step=None, manual=None, extend='both'):
"""
| The levs command manually sets the contour levels.
| min=min - minimum level
| max=max - maximum level
| step=step - step between levels
| manual= manual - set levels manually
| extend='neither', 'both', 'min', or 'max' - colour bar limit extensions
| Use the levs command when a predefined set of levels is required. The
| min, max and step parameters can be used to define a set of levels.
| These can take integer or floating point numbers. If just the step is
| defined then cf-plot will internally try to define a reasonable set
| of levels.
| If colour filled contours are plotted then the default is to extend
| the minimum and maximum contours coloured for out of range values
| - extend='both'.
| Once a user call is made to levs the levels are persistent.
| i.e. the next plot will use the same set of levels.
| Use levs() to reset to undefined levels.
:Returns:
None
"""
if all(val is None for val in [min, max, step, manual]):
plotvars.levels = None
plotvars.levels_min = None
plotvars.levels_max = None
plotvars.levels_step = None
plotvars.levels_extend = 'both'
plotvars.norm = None
plotvars.user_levs = 0
return
if manual is not None:
plotvars.levels = np.array(manual)
plotvars.levels_min = None
plotvars.levels_max = None
plotvars.levels_step = None
# Set the normalization object as we are using potentially unevenly
# spaced levels
ncolors = np.size(plotvars.levels)
if extend == 'both' or extend == 'max':
ncolors = ncolors - 1
plotvars.norm = matplotlib.colors.BoundaryNorm(
boundaries=plotvars.levels, ncolors=ncolors)
plotvars.user_levs = 1
else:
if all(val is not None for val in [min, max, step]):
plotvars.levels_min = min
plotvars.levels_max = max
plotvars.levels_step = step
plotvars.norm = None
if all(isinstance(item, int) for item in [min, max, step]):
lstep = step * 1e-10
levs = (np.arange(min, max + lstep, step, dtype=np.float64))
levs = ((levs * 1e10).astype(np.int64)).astype(np.float64)
levs = (levs / 1e10).astype(np.int)
plotvars.levels = levs
else:
lstep = step * 1e-10
levs = np.arange(min, max + lstep, step, dtype=np.float64)
levs = (levs * 1e10).astype(np.int64).astype(np.float64)
levs = levs / 1e10
plotvars.levels = levs
plotvars.user_levs = 1
# Check for spurious decimal places due to numeric representation
# and fix if found
for pt in np.arange(np.size(plotvars.levels)):
ndecs = str(plotvars.levels[pt])[::-1].find('.')
if ndecs > 7:
plotvars.levels[pt] = round(plotvars.levels[pt], 7)
# If step only is set then reset user_levs to zero
if step is not None and all(val is None for val in [min, max]):
plotvars.user_levs = 0
plotvars.levels = None
plotvars.levels_step = step
# Check extend has a proper value
if extend not in ['neither', 'min', 'max', 'both']:
errstr = "\n\n extend must be one of 'neither', 'min', 'max', 'both'\n"
raise TypeError(errstr)
plotvars.levels_extend = extend
def mapaxis(min=None, max=None, type=None):
"""
| mapaxis is used to work out a sensible set of longitude and latitude
| tick marks and labels. This is an internal routine and is not used
| by the user.
| min=None - minimum axis value
| max=None - maximum axis value
| type=None - 1 = longitude, 2 = latitude
:Returns:
longtitude/latitude ticks and longitude/latitude tick labels
|
|
|
|
|
|
|
"""
degsym = ''
if plotvars.degsym:
degsym = r'$\degree$'
if type == 1:
lonmin = min
lonmax = max
lonrange = lonmax - lonmin
lonstep = 60
if lonrange <= 180:
lonstep = 30
if lonrange <= 90:
lonstep = 10
if lonrange <= 30:
lonstep = 5
if lonrange <= 10:
lonstep = 2
if lonrange <= 5:
lonstep = 1
lons = np.arange(-720, 720 + lonstep, lonstep)
lonticks = []
for lon in lons:
if lon >= lonmin and lon <= lonmax:
lonticks.append(lon)
lonlabels = []
for lon in lonticks:
lon2 = np.mod(lon + 180, 360) - 180
if lon2 < 0 and lon2 > -180:
if lon != 180:
lonlabels.append(str(abs(lon2)) + degsym + 'W')
if lon2 > 0 and lon2 <= 180:
lonlabels.append(str(lon2) + degsym + 'E')
if lon2 == 0:
lonlabels.append('0' + degsym)
if lon == 180 or lon == -180:
lonlabels.append('180' + degsym)
return(lonticks, lonlabels)
if type == 2:
latmin = min
latmax = max
latrange = latmax - latmin
latstep = 30
if latrange <= 90:
latstep = 10
if latrange <= 30:
latstep = 5
if latrange <= 10:
latstep = 2
if latrange <= 5:
latstep = 1
lats = np.arange(-90, 90 + latstep, latstep)
latticks = []
for lat in lats:
if lat >= latmin and lat <= latmax:
latticks.append(lat)
latlabels = []
for lat in latticks:
if lat < 0:
latlabels.append(str(abs(lat)) + degsym + 'S')
if lat > 0:
latlabels.append(str(lat) + degsym + 'N')
if lat == 0:
latlabels.append('0' + degsym)
return(latticks, latlabels)
def timeaxis(dtimes=None):
"""
| timeaxis is used to work out a sensible set of time labels and tick
| marks given a time span This is an internal routine and is not used
| by the user.
| dtimes=None - data times as a CF variable
:Returns:
time ticks and labels
|
|
|
|
|
|
|
"""
time_units = dtimes.Units
time_ticks = []
time_labels = []
axis_label = 'Time'
yearmin = min(dtimes.year.array)
yearmax = max(dtimes.year.array)
tmin = min(dtimes.dtarray)
tmax = max(dtimes.dtarray)
if hasattr(dtimes, 'calendar'):
calendar = dtimes.calendar
else:
calendar = 'standard'
if plotvars.user_gset != 0:
if isinstance(plotvars.xmin, str):
t = cf.Data(cf.dt(plotvars.xmin), units=time_units, calendar=calendar)
yearmin = int(t.year)
t = cf.Data(cf.dt(plotvars.xmax), units=time_units, calendar=calendar)
yearmax = int(t.year)
tmin = cf.dt(plotvars.xmin, units=time_units, calendar=calendar)
tmax = cf.dt(plotvars.xmax, units=time_units, calendar=calendar)
if isinstance(plotvars.ymin, str):
t = cf.Data(cf.dt(plotvars.ymin), units=time_units, calendar=calendar)
yearmin = int(t.year)
t = cf.Data(cf.dt(plotvars.ymax), units=time_units, calendar=calendar)
yearmax = int(t.year)
tmin = cf.dt(plotvars.ymin, calendar=calendar)
tmax = cf.dt(plotvars.ymax, calendar=calendar)
# Years
span = yearmax - yearmin
if span > 4 and span < 3000:
axis_label = 'Time (year)'
tvals = []
if span <= 15:
step = 1
if span > 15:
step = 2
if span > 30:
step = 5
if span > 60:
step = 10
if span > 160:
step = 20
if span > 300:
step = 50
if span > 600:
step = 100
if span > 1300:
step = 200
if plotvars.tspace_year is not None:
step = plotvars.tspace_year
years = np.arange(yearmax / step + 2) * step
tvals = years[np.where((years >= yearmin) & (years <= yearmax))]
# Catch tvals if not properly defined and use gvals to generate some
# year tick marks
if np.size(tvals) < 2:
tvals = gvals(dmin=yearmin, dmax=yearmax)[0]
for year in tvals:
time_ticks.append(np.min(
(cf.Data(cf.dt(str(int(year)) + '-01-01 00:00:00'),
units=time_units, calendar=calendar).array)))
time_labels.append(str(int(year)))
# Months
if yearmax - yearmin <= 4:
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
# Check number of labels with 1 month steps
tsteps = 0
for year in np.arange(yearmax - yearmin + 1) + yearmin:
for month in np.arange(12):
mytime = cf.dt(str(year) + '-' +
str(month + 1) + '-01 00:00:00', calendar=calendar)
if mytime >= tmin and mytime <= tmax:
tsteps = tsteps + 1
if tsteps < 17:
mvals = np.arange(12)
if tsteps >= 17:
mvals = np.arange(4) * 3
for year in np.arange(yearmax - yearmin + 1) + yearmin:
for month in mvals:
mytime = cf.dt(str(year) + '-' +
str(month + 1) + '-01 00:00:00', calendar=calendar)
if mytime >= tmin and mytime <= tmax:
time_ticks.append(
np.min((cf.Data(mytime, units=time_units, calendar=calendar).array)))
time_labels.append(
str(months[month]) + ' ' + str(int(year)))
# Days and hours
if np.size(time_ticks) <= 2:
myday = cf.dt(int(tmin.year), int(tmin.month), int(tmin.day), calendar=calendar)
not_found = 0
hour_counter = 0
span = 0
while not_found <= 48:
mydate = cf.Data(myday, dtimes.Units) + \
cf.Data(hour_counter, 'hour')
if mydate >= tmin and mydate <= tmax:
span = span + 1
else:
not_found = not_found + 1
hour_counter = hour_counter + 1
step = 1
if span > 13:
step = 1
if span > 13:
step = 4
if span > 25:
step = 6
if span > 100:
step = 12
if span > 200:
step = 24
if span > 400:
step = 48
if span > 800:
step = 96
if plotvars.tspace_hour is not None:
step = plotvars.tspace_hour
if plotvars.tspace_day is not None:
step = plotvars.tspace_day * 24
not_found = 0
hour_counter = 0
axis_label = 'Time (hour)'
if span >= 24:
axis_label = 'Time'
time_ticks = []
time_labels = []
while not_found <= 48:
mytime = cf.Data(myday, dtimes.Units) + cf.Data(hour_counter, 'hour')
if mytime >= tmin and mytime <= tmax:
time_ticks.append(np.min(mytime.array))
label = str(mytime.year) + '-' + str(mytime.month) + '-' + str(mytime.day)
if (hour_counter/24 != int(hour_counter/24)):
label += ' ' + str(mytime.hour) + ':00:00'
time_labels.append(label)
else:
not_found = not_found + 1
hour_counter = hour_counter + step
return(time_ticks, time_labels, axis_label)
def ndecs(data=None):
"""
| ndecs finds the number of decimal places in an array. Needed to make the
| colour bar match the contour line labelling.
| data=data - input array of values
:Returns:
| maximum number of necimal places
|
|
|
|
|
|
|
|
"""
maxdecs = 0
for i in range(len(data)):
number = data[i]
a = str(number).split('.')
if np.size(a) == 2:
number_decs = len(a[1])
if number_decs > maxdecs:
maxdecs = number_decs
return maxdecs
def axes(xticks=None, xticklabels=None, yticks=None, yticklabels=None,
xstep=None, ystep=None, xlabel=None, ylabel=None, title=None):
"""
| axes is a function to set axes plotting parameters. The xstep and ystep
| parameters are used to label the axes starting at the left hand side and
| bottom of the plot respectively. For tighter control over labelling use
| xticks, yticks to specify the tick positions and xticklabels,
| yticklabels to specify the associated labels.
| xstep=xstep - x axis step
| ystep=ystep - y axis step
| xlabel=xlabel - label for the x-axis
| ylabel=ylabel - label for the y-axis
| xticks=xticks - values for x ticks
| xticklabels=xticklabels - labels for x tick marks
| yticks=yticks - values for y ticks
| yticklabels=yticklabels - labels for y tick marks
| title=None - set title
|
| Use axes() to reset all the axes plotting attributes to the default.
:Returns:
None
"""
if all(val is None for val in [
xticks, yticks, xticklabels, yticklabels, xstep, ystep, xlabel,
ylabel, title]):
plotvars.xticks = None
plotvars.yticks = None
plotvars.xticklabels = None
plotvars.yticklabels = None
plotvars.xstep = None
plotvars.ystep = None
plotvars.xlabel = None
plotvars.ylabel = None
plotvars.title = None
return
plotvars.xticks = xticks
plotvars.yticks = yticks
plotvars.xticklabels = xticklabels
plotvars.yticklabels = yticklabels
plotvars.xstep = xstep
plotvars.ystep = ystep
plotvars.xlabel = xlabel
plotvars.ylabel = ylabel
plotvars.title = title
def axes_plot(xticks=None, xticklabels=None, yticks=None, yticklabels=None,
xlabel=None, ylabel=None, title=None):
"""
| axes_plot is a system function to specify axes plotting parameters.
| Use xticks, yticks to specify the tick positions and xticklabels,
| yticklabels to specify the associated labels.
|
| xticks=xticks - values for x ticks
| xticklabels=xticklabels - labels for x tick marks
| yticks=yticks - values for y ticks
| yticklabels=yticklabels - labels for y tick marks
| xlabel=xlabel - label for the x-axis
| ylabel=ylabel - label for the y-axis
| title=None - set title
|
:Returns:
None
"""
if plotvars.title is not None:
title = plotvars.title
title_fontsize = plotvars.title_fontsize
text_fontsize = plotvars.text_fontsize
axis_label_fontsize = plotvars.axis_label_fontsize
if title_fontsize is None:
title_fontsize = 15
if text_fontsize is None:
text_fontsize = 11
if axis_label_fontsize is None:
axis_label_fontsize = 11
axis_label_fontweight = plotvars.axis_label_fontweight
title_fontweight = plotvars.title_fontweight
if (plotvars.plot_type == 1 or plotvars.plot_type == 6) and plotvars.proj == 'cyl':
plot = plotvars.mymap
lon_mid = plotvars.lonmin + (plotvars.lonmax - plotvars.lonmin) / 2.0
plotargs = {'crs': ccrs.PlateCarree()}
else:
plot = plotvars.plot
plotargs = {}
if xlabel is not None:
plotvars.plot.set_xlabel(xlabel, fontsize=axis_label_fontsize,
fontweight=axis_label_fontweight)
if ylabel is not None:
plotvars.plot.set_ylabel(ylabel, fontsize=axis_label_fontsize,
fontweight=axis_label_fontweight)
xticklen = (plotvars.lonmax - plotvars.lonmin)*0.007
yticklen = (plotvars.latmax-plotvars.latmin)*0.014
# set the plot
if (plotvars.plot_type == 1 or plotvars.plot_type == 6):
this_plot = plotvars.mymap
else:
this_plot = plotvars.plot
if plotvars.plot_type == 6 and plotvars.proj == 'rotated':
this_plot = plotvars.plot
# get the plot bounds
l, b, w, h = this_plot.get_position().bounds
lonrange = plotvars.lonmax - plotvars.lonmin
lon_mid = plotvars.lonmin + (plotvars.lonmax - plotvars.lonmin) / 2.0
# Set the ticks and tick labels
if xticks is not None:
# fudge min and max longitude tick positions or the labels wrap
xticks_new = xticks
if lonrange >= 360:
xticks_new[0] = xticks_new[0] + 0.01
xticks_new[-1] = xticks_new[-1] - 0.01
plot.set_xticks(xticks_new, **plotargs)
plot.set_xticklabels(xticklabels,
rotation=plotvars.xtick_label_rotation,
horizontalalignment=plotvars.xtick_label_align)
# Plot a corresponding tick on the top of the plot - cartopy feature?
proj = ccrs.PlateCarree(central_longitude=lon_mid)
if plotvars.plot_type == 1:
for xval in xticks_new:
xpt, ypt = proj.transform_point(xval, plotvars.latmax, ccrs.PlateCarree())
ypt2 = ypt + yticklen
plot.plot([xpt, xpt], [ypt, ypt2], color='k', linewidth=0.8, clip_on=False)
if yticks is not None:
plot.set_yticks(yticks, **plotargs)
plot.set_yticklabels(yticklabels,
rotation=plotvars.ytick_label_rotation,
horizontalalignment=plotvars.ytick_label_align)
# Plot a corresponding tick on the right of the plot - cartopy feature?
proj = ccrs.PlateCarree(central_longitude=lon_mid)
for ytick in yticks:
xpt, ypt = proj.transform_point(plotvars.lonmax-0.001, ytick, ccrs.PlateCarree())
xpt2 = xpt + xticklen
plot.plot([xpt, xpt2], [ypt, ypt], color='k', linewidth=0.8, clip_on=False)
# Set font size and weight
for label in plot.xaxis.get_ticklabels():
label.set_fontsize(axis_label_fontsize)
label.set_fontweight(axis_label_fontweight)
for label in plot.yaxis.get_ticklabels():
label.set_fontsize(axis_label_fontsize)
label.set_fontweight(axis_label_fontweight)
# Title
if title is not None:
plot.set_title(title, y=1.03, fontsize=title_fontsize, fontweight=title_fontweight)
def gset(xmin=None, xmax=None, ymin=None, ymax=None,
xlog=False, ylog=False, user_gset=1, twinx=None, twiny=None):
"""
| Set plot limits for all non longitude-latitide plots.
| xmin, xmax, ymin, ymax are all needed to set the plot limits.
| Set xlog/ylog to True or 1 to get a log axis.
|
| xmin=None - x minimum
| xmax=None - x maximum
| ymin=None - y minimum
| ymax=None - y maximum
| xlog=False - log x
| ylog=False - log y
| twinx=None - set to True to make a twin y axis plot
| twiny=None - set to True to make a twin x axis plot
|
| Once a user call is made to gset the plot limits are persistent.
| i.e. the next plot will use the same set of plot limits.
| Use gset() to reset to undefined plot limits i.e. the full range
| of the data.
|
| To set date axes use date strings i.e.
| cfp.gset(xmin = '1970-1-1', xmax = '1999-12-31', ymin = 285,
| ymax = 295)
|
| Note the correct date format is 'YYYY-MM-DD' or 'YYYY-MM-DD HH:MM:SS'
| anything else will give unexpected results.
:Returns:
None
|
|
|
|
"""
plotvars.user_gset = user_gset
if all(val is None for val in [xmin, xmax, ymin, ymax]):
plotvars.xmin = None
plotvars.xmax = None
plotvars.ymin = None
plotvars.ymax = None
plotvars.xlog = False
plotvars.ylog = False
plotvars.twinx = False
plotvars.twiny = False
plotvars.user_gset = 0
return
bcount = 0
for val in [xmin, xmax, ymin, ymax]:
if val is None:
bcount = bcount + 1
if bcount != 0 and bcount != 4:
errstr = 'gset error\n'
errstr += 'xmin, xmax, ymin, ymax all need to be passed to gset\n'
errstr += 'to set the plot limits\n'
raise Warning(errstr)
plotvars.xmin = xmin
plotvars.xmax = xmax
plotvars.ymin = ymin
plotvars.ymax = ymax
plotvars.xlog = xlog
plotvars.ylog = ylog
# Check if any axes are time strings
time_xstr = False
time_ystr = False
try:
float(xmin)
except Exception:
time_xstr = True
try:
float(ymin)
except Exception:
time_ystr = True
# Set plot limits
if plotvars.plot is not None and twinx is None and twiny is None:
if not time_xstr and not time_ystr:
plotvars.plot.axis(
[plotvars.xmin, plotvars.xmax, plotvars.ymin, plotvars.ymax])
if plotvars.xlog:
plotvars.plot.set_xscale('log')
if plotvars.ylog:
plotvars.plot.set_yscale('log')
# Set twinx or twiny if requested
if twinx is not None:
plotvars.twinx = twinx
if twiny is not None:
plotvars.twiny = twiny
def gopen(rows=1, columns=1, user_plot=1, file='cfplot.png',
orientation='landscape', figsize=[11.7, 8.3],
left=None, right=None, top=None, bottom=None, wspace=None,
hspace=None, dpi=None, user_position=False):
"""
| gopen is used to open a graphic file.
|
| rows=1 - number of plot rows on the page
| columns=1 - number of plot columns on the page
| user_plot=1 - internal plot variable - do not use.
| file='cfplot.png' - default file name
| orientation='landscape' - orientation - also takes 'portrait'
| figsize=[11.7, 8.3] - figure size in inches
| left=None - left margin in normalised coordinates - default=0.12
| right=None - right margin in normalised coordinates - default=0.92
| top=None - top margin in normalised coordinates - default=0.08
| bottom=None - bottom margin in normalised coordinates - default=0.08
| wspace=None - width reserved for blank space between subplots - default=0.2
| hspace=None - height reserved for white space between subplots - default=0.2
| dpi=None - resolution in dots per inch
| user_position=False - set to True to supply plot position via gpos
| xmin, xmax, ymin, ymax values
:Returns:
None
|
|
|
|
|
"""
# Set values in globals
plotvars.rows = rows
plotvars.columns = columns
if file != 'cfplot.png':
plotvars.file = file
plotvars.orientation = orientation
plotvars.user_plot = user_plot
# Set user defined plot area to None
plotvars.plot_xmin = None
plotvars.plot_xmax = None
plotvars.plot_ymin = None
plotvars.plot_ymax = None
if left is None:
left = 0.12
if right is None:
right = 0.92
if top is None:
top = 0.95
if bottom is None:
bottom = 0.08
if rows >= 3:
bottom = 0.1
if wspace is None:
wspace = 0.2
if hspace is None:
hspace = 0.2
if rows >= 3:
hspace = 0.5
if orientation != 'landscape':
if orientation != 'portrait':
errstr = 'gopen error\n'
errstr += 'orientation incorrectly set\n'
errstr += 'input value was ' + orientation + '\n'
errstr += 'Valid options are portrait or landscape\n'
raise Warning(errstr)
# Set master plot size
if orientation == 'landscape':
plotvars.master_plot = plot.figure(figsize=(figsize[0], figsize[1]))
else:
plotvars.master_plot = plot.figure(figsize=(figsize[1], figsize[0]))
# Set margins
plotvars.master_plot.subplots_adjust(
left=left,
right=right,
top=top,
bottom=bottom,
wspace=wspace,
hspace=hspace)
# Set initial subplot
if user_position is False:
gpos(pos=1)
# Change tick length for plots > 2x2
if (columns > 2 or rows > 2):
matplotlib.rcParams['xtick.major.size'] = 2
matplotlib.rcParams['ytick.major.size'] = 2
# Set image resolution
if dpi is not None:
plotvars.dpi = dpi
def gclose(view=True):
"""
| gclose saves a graphics file. The default is to view the file as well
| - use view = False to turn this off.
| view = True - view graphics file
:Returns:
None
|
|
|
|
|
|
|
|
|
"""
# Reset the user_plot variable to off
plotvars.user_plot = 0
# Test for python or ipython
interactive = False
try:
__IPYTHON__
interactive = True
except NameError:
interactive = False
if matplotlib.is_interactive():
interactive = True
# Remove whitespace if requested
saveargs = {}
if plotvars.tight:
saveargs = {'bbox_inches': 'tight'}
file = plotvars.file
if file is not None:
# Save a file
type = 1
if file[-3:] == '.ps':
type = 1
if file[-4:] == '.eps':
type = 1
if file[-4:] == '.png':
type = 1
if file[-4:] == '.pdf':
type = 1
if type is None:
file = file + '.png'
plotvars.master_plot.savefig(
file, orientation=plotvars.orientation, dpi=plotvars.dpi, **saveargs)
plot.close()
else:
if plotvars.viewer == 'display' and interactive is False:
# Use Imagemagick display command if this exists
disp = which('display')
if disp is not None:
tfile = 'cfplot.png'
plotvars.master_plot.savefig(
tfile, orientation=plotvars.orientation, dpi=plotvars.dpi, **saveargs)
matplotlib.pyplot.ioff()
subprocess.Popen([disp, tfile])
else:
plotvars.viewer = 'matplotlib'
if plotvars.viewer == 'matplotlib' or interactive:
# Use Matplotlib viewer
matplotlib.pyplot.ion()
plot.show()
# Reset plotting
plotvars.plot = None
plotvars.twinx = None
plotvars.twiny = None
plotvars.plot_xmin = None
plotvars.plot_xmax = None
plotvars.plot_ymin = None
plotvars.plot_ymax = None
plotvars.graph_xmin = None
plotvars.graph_xmax = None
plotvars.graph_ymin = None
plotvars.graph_ymax = None
def gpos(pos=1, xmin=None, xmax=None, ymin=None, ymax=None):
"""
| Set plot position. Plots start at top left and increase by one each plot
| to the right. When the end of the row has been reached then the next
| plot will be the leftmost plot on the next row down.
| pos=pos - plot position
|
| The following four parameters are used to get full user control
| over the plot position. In addition to these cfp.gopen
| must have the user_position=True parameter set.
| xmin=None xmin in normalised coordinates
| xmax=None xmax in normalised coordinates
| ymin=None ymin in normalised coordinates
| ymax=None ymax in normalised coordinates
|
|
:Returns:
None
|
|
|
|
|
|
|
|
"""
# Check inputs are okay
if pos < 1 or pos > plotvars.rows * plotvars.columns:
errstr = 'pos error - pos out of range:\n range = 1 - '
errstr = errstr + str(plotvars.rows * plotvars.columns)
errstr = errstr + '\n input pos was ' + str(pos)
errstr = errstr + '\n'
raise Warning(errstr)
user_pos = False
if all(val is not None for val in [xmin, xmax, ymin, ymax]):
user_pos = True
plotvars.plot_xmin = xmin
plotvars.plot_xmax = xmax
plotvars.plot_ymin = ymin
plotvars.plot_ymax = ymax
# Reset any accumulated muliple graph limits
plotvars.graph_xmin = None
plotvars.graph_xmax = None
plotvars.graph_ymin = None
plotvars.graph_ymax = None
if user_pos is False:
plotvars.plot = plotvars.master_plot.add_subplot(
plotvars.rows, plotvars.columns, pos)
else:
delta_x = plotvars.plot_xmax - plotvars.plot_xmin
delta_y = plotvars.plot_ymax - plotvars.plot_ymin
plotvars.plot = plotvars.master_plot.add_axes([plotvars.plot_xmin,
plotvars.plot_ymin,
delta_x, delta_y])
plotvars.plot.tick_params(which='both', direction='out', right=True, top=True)
# Set position in global variables
plotvars.pos = pos
# Reset contour levels if they are not defined by the user
if plotvars.user_levs == 0:
if plotvars.levels_step is None:
levs()
else:
levs(step=plotvars.levels_step)
def pcon(mb=None, km=None, h=7.0, p0=1000):
"""
| pcon is a function for converting pressure to height in kilometers and
| vice-versa. This function uses the equation P=P0exp(-z/H) to translate
| between pressure and height. In pcon the surface pressure P0 is set to
| 1000.0mb and the scale height H is set to 7.0. The value of H can vary
| from 6.0 in the polar regions to 8.5 in the tropics as well as
| seasonally. The value of P0 could also be said to be 1013.25mb rather
| than 1000.0mb.
| As this relationship is approximate:
| (i) Only use this for making the axis labels on y axis pressure plots
| (ii) Put the converted axis on the right hand side to indicate that
| this isn't the primary unit of measure
| print cfp.pcon(mb=[1000, 300, 100, 30, 10, 3, 1, 0.3])
| [0. 8.42780963 16.11809565 24.54590528 32.2361913
| 40.66400093 48.35428695, 56.78209658]
| mb=None - input pressure
| km=None - input height
| h=7.0 - default value for h
| p0=1000 - default value for p0
:Returns:
| pressure(mb) if height(km) input,
| height(km) if pressure(mb) input
"""
if all(val is None for val in [mb, km]) == 2:
errstr = 'pcon error - pcon must have mb or km input\n'
raise Warning(errstr)
if mb is not None:
return h * (np.log(p0) - np.log(mb))
if km is not None:
return np.exp(-1.0 * (np.array(km) / h - np.log(p0)))
def supscr(text=None):
"""
| supscr - add superscript text formatting for ** and ^
| This is an internal routine used in titles and colour bars
| and not used by the user.
| text=None - input text
:Returns:
Formatted text
|
|
|
|
|
|
|
"""
if text is None:
errstr = '\n supscr error - supscr must have text input\n'
raise Warning(errstr)
tform = ''
sup = 0
for i in text:
if (i == '^'):
sup = 2
if (i == '*'):
sup = sup + 1
if (sup == 0):
tform = tform + i
if (sup == 1):
if (i not in '*'):
tform = tform + '*' + i
sup = 0
if (sup == 3):
if i in '-0123456789':
tform = tform + i
else:
tform = tform + '}$' + i
sup = 0
if (sup == 2):
tform = tform + '$^{'
sup = 3
if (sup == 3):
tform = tform + '}$'
tform = tform.replace('m2', 'm$^{2}$')
tform = tform.replace('m3', 'm$^{3}$')
tform = tform.replace('m-2', 'm$^{-2}$')
tform = tform.replace('m-3', 'm$^{-3}$')
tform = tform.replace('s-1', 's$^{-1}$')
tform = tform.replace('s-2', 's$^{-2}$')
return tform
def gvals(dmin=None, dmax=None, mystep=None, mod=True):
"""
| gvals - work out a sensible set of values between two limits
| This is an internal routine used for contour levels and axis
| labelling and is not generally used by the user.
| dmin = None - minimum
| dmax = None - maximum
| mystep = None - use this step
| mod = True - modify data to make use of a multipler
|
|
|
|
|
|
"""
# Copies of inputs as these might be changed
dmin1 = deepcopy(dmin)
dmax1 = deepcopy(dmax)
# Swap values if dmin1 > dmax1 as this returns no values
if dmax1 < dmin1:
dmin1, dmax1 = dmax1, dmin1
# Data range
data_range = dmax1 - dmin1
# field multiplier
mult = 0
# Return some values if dmin1 = dmax1
if dmin1 == dmax1:
vals = np.array([dmin1 - 1, dmin1, dmin1 + 1])
mult = 0
return vals, mult
# Modify if requested or if out of range 0.001 to 2000000
if data_range < 0.001:
while dmax1 <= 3:
dmin1 = dmin1 * 10.0
dmax1 = dmax1 * 10.0
data_range = dmax1 - dmin1
mult = mult - 1
if data_range > 2000000:
while dmax1 > 10:
dmin1 = dmin1 / 10.0
dmax1 = dmax1 / 10.0
data_range = dmax1 - dmin1
mult = mult + 1
if data_range >= 0.001 and data_range <= 2000000:
# Calculate an appropriate step
step = None
test_steps = [0.0001, 0.0002, 0.0005, 0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1,
0.2, 0.5, 1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000,
20000, 50000, 100000]
if mystep is not None:
step = mystep
else:
for val in test_steps:
nvals = data_range / val
if val < 1:
if nvals > 8:
step = val
else:
if nvals > 11:
step = val
# Return an error if no step found
if step is None:
errstr = '\n\n cfp.gvals - no valid step values found \n\n'
errstr += 'cfp.gvals(' + str(dmin1) + ',' + str(dmax1) + ')\n\n'
raise Warning(errstr)
# values < 0.0
vals = None
vals1 = None
if dmin1 < 0.0:
vals1 = (np.arange(-dmin1 / step) * -step)[::-1] - step
# values >= 0.0
vals2 = None
if dmax1 >= 0.0:
vals2 = np.arange(dmax1 / step + 1) * step
if vals1 is not None and vals2 is None:
vals = vals1
if vals2 is not None and vals1 is None:
vals = vals2
if vals1 is not None and vals2 is not None:
vals = np.concatenate((vals1, vals2))
# Round off decimal numbers so that
# (np.arange(4) * -0.1)[3] = -0.30000000000000004 gives -0.3 as expected
if step < 1:
vals = vals.round(6)
# Change values to integers for values >= 1
if step >= 1:
vals = vals.astype(int)
pts = np.where(np.logical_and(vals >= dmin1, vals <= dmax1))
if np.min(pts) > -1:
vals = vals[pts]
if mod is False:
vals = vals * 10**mult
mult = 0
return(vals, mult)
def cf_data_assign(f=None, colorbar_title=None, verbose=None, rotated_vect=False):
"""
| Check cf input data is okay and return data for contour plot.
| This is an internal routine not used by the user.
| f=None - input cf field
| colorbar_title=None - input colour bar title
| rotated vect=False - return 1D x and y for rotated plot vectors
| verbose=None - set to 1 to get a verbose idea of what the
| cf_data_assign is doing
:Returns:
| f - data for contouring
| x - x coordinates of data (optional)
| y - y coordinates of data (optional)
| ptype - plot type
| colorbar_title - colour bar title
| xlabel - x label for plot
| ylabel - y label for plot
|
|
|
|
|
"""
# Check input data has the correct number of dimensions
# Take into account rotated pole fields having extra dimensions
ndim = len(f.domain_axes().filter_by_size(cf.gt(1)))
if f.ref('rotated_latitude_longitude', default=False) is False:
if (ndim > 2 or ndim < 1):
print('')
if (ndim > 2):
errstr = 'cf_data_assign error - data has too many dimensions'
if (ndim < 1):
errstr = 'cf_data_assign error - data has too few dimensions'
errstr += '\n cf-plot requires one or two dimensional data\n'
for mydim in list(f.dimension_coordinates()):
sn = getattr(f.construct(mydim), 'standard_name', False)
ln = getattr(f.construct(mydim), 'long_name', False)
if sn:
errstr = errstr + \
str(mydim) + ',' + str(sn) + ',' + \
str(f.construct(mydim).size) + '\n'
else:
if ln:
errstr = errstr + \
str(mydim) + ',' + str(ln) + ',' + \
str(f.construct(mydim).size) + '\n'
raise Warning(errstr)
# Set up data arrays and variables
lons = None
lats = None
height = None
time = None
xlabel = ''
ylabel = ''
has_lons = None
has_lats = None
has_height = None
has_time = None
xpole = None
ypole = None
ptype = None
field = None
x = None
y = None
# Extract coordinate data if a matching CF standard_name or axis is found
for mydim in list(f.dimension_coordinates()):
sn = getattr(f.construct(mydim), 'standard_name', 'NoName')
an = f.construct(mydim).get_property('axis', 'NoName')
standard_name_x = ['longitude']
vs = 'cf_data_assign standard_name, axis - assigned '
if (sn in standard_name_x or an == 'X'):
if verbose:
print(vs + 'lons -', sn, an)
lons = np.squeeze(f.construct(mydim).array)
standard_name_y = ['latitude']
if (sn in standard_name_y or an == 'Y'):
if verbose:
print(vs + 'lats -', sn, an)
lats = np.squeeze(f.construct(mydim).array)
standard_name_z = ['pressure', 'air_pressure', 'height', 'depth']
if (sn in standard_name_z or an == 'Z'):
if verbose:
print(vs + 'height -', sn, an)
height = np.squeeze(f.construct(mydim).array)
standard_name_t = ['time']
if (sn in standard_name_t or an == 'T'):
if verbose:
print(vs + 'time -', sn, an)
time = np.squeeze(f.construct(mydim).array)
# CF defined units
lon_units = ['degrees_east', 'degree_east', 'degree_E',
'degrees_E', 'degreeE', 'degreesE']
lat_units = ['degrees_north', 'degree_north', 'degree_N',
'degrees_N', 'degreeN', 'degreesN']
height_units = ['mb', 'mbar', 'millibar', 'decibar', 'atmosphere',
'atm', 'pascal', 'Pa', 'hPa']
time_units = ['day', 'days', 'd', 'hour', 'hours', 'hr', 'h', 'minute',
'minutes', 'min', 'mins', 'second', 'seconds', 'sec',
'secs', 's']
# Extract coordinate data if a matching CF set of units is found
for mydim in list(f.dimension_coordinates()):
units = getattr(f.construct(mydim), 'units', False)
if units in lon_units:
if lons is None:
if verbose:
print('cf_data_assign units - assigned lons -', units)
lons = np.squeeze(f.construct(mydim).array)
if units in lat_units:
if lats is None:
if verbose:
print('cf_data_assign units - assigned lats -', units)
lats = np.squeeze(f.construct(mydim).array)
if units in height_units:
if height is None:
if verbose:
print('cf_data_assign units - assigned height -', units)
height = np.squeeze(f.construct(mydim).array)
if units in time_units:
if time is None:
if verbose:
print('cf_data_assign units - assigned time -', units)
time = np.squeeze(f.construct(mydim).array)
# Extract coordinate data from variable name if not already assigned
for mydim in list(f.dimension_coordinates()):
name = cf_var_name(field=f, dim=mydim)
vs = 'cf_data_assign dimension name - assigned '
if name[0:3] == 'lon':
if lons is None:
if verbose:
print(vs + 'lons' + '-', name)
lons = np.squeeze(f.construct(mydim).array)
if name[0:3] == 'lat':
if lats is None:
if verbose:
print(vs+'lats' + '-', name)
lats = np.squeeze(f.construct(mydim).array)
if (name[0:5] == 'theta' or name == 'p' or name == 'air_pressure'):
if height is None:
if verbose:
print(vs+'height' + '-', name)
height = np.squeeze(f.construct(mydim).array)
if name[0:1] == 't':
if time is None:
if verbose:
print(vs+'time ' + '-', name)
time = np.squeeze(f.construct(mydim).array)
if np.size(lons) > 1:
has_lons = 1
if np.size(lats) > 1:
has_lats = 1
if np.size(height) > 1:
has_height = 1
if np.size(time) > 1:
has_time = 1
# assign field data
field = np.squeeze(f.array)
# Change Boolean data to integer
if str(f.dtype) == 'bool':
warnstr = '\n\n\n Warning - boolean data found - converting to integers\n\n\n'
print(warnstr)
g = deepcopy(f)
g.dtype = int
field = np.squeeze(g.array)
# Check what plot type is required.
# 0=simple contour plot, 1=map plot, 2=latitude-height plot,
# 3=longitude-time plot, 4=latitude-time plot.
if (np.size(lons) > 1 and np.size(lats) > 1):
ptype = 1
x = lons
y = lats
if (np.size(lats) > 1 and np.size(height) > 1):
ptype = 2
x = lats
y = height
for mydim in list(f.dimension_coordinates()):
name = cf_var_name(field=f, dim=mydim)
if name[0:3] == 'lat':
xunits = str(getattr(f.construct(mydim), 'Units', ''))
if (xunits in lat_units):
xunits = 'degrees'
xlabel = name + ' (' + xunits + ')'
if (name[0:1] == 'p' or name == 'air_pressure' or
name[0:5] == 'theta' or name[0:6] == 'height' or
name[0:6] == 'hybrid' or name[0:5] == 'level' or
name[0:5] == 'model'):
yunits = str(getattr(f.construct(mydim), 'Units', ''))
ylabel = name + ' (' + yunits + ')'
if (np.size(lons) > 1 and np.size(height) > 1):
ptype = 3
x = lons
y = height
for mydim in list(f.dimension_coordinates()):
name = cf_var_name(field=f, dim=mydim)
if name[0:3] == 'lon':
xunits = str(getattr(f.construct(mydim), 'Units', ''))
if (xunits in lon_units):
xunits = 'degrees'
xlabel = name + ' (' + xunits + ')'
if (name[0:1] == 'p' or name[0:5] == 'theta' or
name[0:6] == 'height' or name[0:6] == 'hybrid' or
name[0:5] == 'level' or name[0:5] == 'model'):
yunits = str(getattr(f.construct(mydim), 'Units', ''))
ylabel = name + ' (' + yunits + ')'
if (np.size(lons) > 1 and np.size(time) > 1):
ptype = 4
x = lons
y = time
if np.size(lats) > 1 and np.size(time) > 1:
ptype = 5
x = lats
y = time
# Rotated pole
if f.ref('rotated_latitude_longitude', default=False):
ptype = 6
rotated_pole = f.ref('rotated_latitude_longitude')
xpole = rotated_pole['grid_north_pole_longitude']
ypole = rotated_pole['grid_north_pole_latitude']
# Extract grid x and y coordinates
for mydim in list(f.dimension_coordinates()):
name = cf_var_name(field=f, dim=mydim)
if name in ['grid_longitude', 'longitude', 'x']:
x = np.squeeze(f.construct(mydim).array)
xunits = str(getattr(f.construct(mydim), 'units', ''))
xlabel = cf_var_name(field=f, dim=mydim)
if name in ['grid_latitude', 'latitude', 'y']:
y = np.squeeze(f.construct(mydim).array)
# Flip y and data if reversed
if y[0] > y[-1]:
y = y[::-1]
field = np.flipud(field)
yunits = str(getattr(f.construct(mydim), 'Units', ''))
ylabel = cf_var_name(field=f, dim=mydim) + yunits
# Extract auxiliary lons and lats if they exist
if ptype == 1 or ptype is None:
if plotvars.proj != 'rotated' and not rotated_vect:
aux_lons = False
aux_lats = False
for mydim in list(f.auxiliary_coordinates()):
name = cf_var_name(field=f, dim=mydim)
if name in ['longitude']:
xpts = np.squeeze(f.construct(mydim).array)
aux_lons = True
if name in ['latitude']:
ypts = np.squeeze(f.construct(mydim).array)
aux_lats = True
if aux_lons and aux_lats:
x = xpts
y = ypts
ptype = 1
# time height plot
if has_height == 1 and has_time == 1:
ptype = 7
for mydim in list(f.dimension_coordinates()):
if np.size(np.squeeze(f.construct(mydim).array)
) == np.shape(np.squeeze(f.array))[0]:
x = np.squeeze(f.construct(mydim).array)
xunits = str(getattr(f.construct(mydim), 'units', ''))
xlabel = cf_var_name(field=f, dim=mydim) + xunits
if np.size(np.squeeze(f.construct(mydim).array)
) == np.shape(np.squeeze(f.array))[1]:
y = np.squeeze(f.construct(mydim).array)
yunits = '(' + str(getattr(f.construct(mydim), 'Units', ''))
yunits += ')'
ylabel = cf_var_name(field=f, dim=mydim) + yunits
# Rotate array to get it as time vs height
field = np.rot90(field)
field = np.flipud(field)
# UKCP grid
if f.ref('transverse_mercator', default=False):
ptype = 1
field = np.squeeze(f.array)
# Find the auxiliary lons and lats if provided
has_lons = False
has_lats = False
for mydim in list(f.auxiliary_coordinates()):
name = cf_var_name(field=f, dim=mydim)
if name in ['longitude']:
x = np.squeeze(f.construct(mydim).array)
has_lons = True
if name in ['latitude']:
y = np.squeeze(f.construct(mydim).array)
has_lats = True
# Calculate lons and lats if no auxiliary data for these
if not has_lons or not has_lats:
xpts = f.construct('X').array
ypts = f.construct('Y').array
field = np.squeeze(f.array)
ref = f.ref('transverse_mercator')
false_easting = ref['false_easting']
false_northing = ref['false_northing']
central_longitude = ref['longitude_of_central_meridian']
central_latitude = ref['latitude_of_projection_origin']
scale_factor = ref['scale_factor_at_central_meridian']
# Set the transform
transform = ccrs.TransverseMercator(false_easting=false_easting,
false_northing=false_northing,
central_longitude=central_longitude,
central_latitude=central_latitude,
scale_factor=scale_factor)
# Calculate the longitude and latitude points
xvals, yvals = np.meshgrid(xpts, ypts)
points = ccrs.PlateCarree().transform_points(transform, xvals, yvals)
x = np.array(points)[:, :, 0]
y = np.array(points)[:, :, 1]
# None of the above
if ptype is None:
ptype = 0
data_axes = f.get_data_axes()
count = 1
for d in data_axes:
i = f.constructs.domain_axis_identity(d)
try:
c = f.coordinate(i)
if np.size(c.array) > 1:
if count == 1:
y = c
elif count == 2:
x = c
count += 1
except ValueError:
errstr = "\n\ncf_data_assign - cannot find data to return\n\n"
errstr += str(c) + "\n\n"
raise Warning(errstr)
xunits = str(getattr(f.construct(mydim), 'units', ''))
xlabel = cf_var_name(field=f, dim=mydim) + xunits
yunits = str(getattr(f.construct(mydim), 'Units', ''))
ylabel = cf_var_name(field=f, dim=mydim) + yunits
# Assign colorbar_title
if (colorbar_title is None):
colorbar_title = 'No Name'
if hasattr(f, 'id'):
colorbar_title = f.id
nc = f.nc_get_variable(None)
if nc:
colorbar_title = f.nc_get_variable()
if hasattr(f, 'short_name'):
colorbar_title = f.short_name
if hasattr(f, 'long_name'):
colorbar_title = f.long_name
if hasattr(f, 'standard_name'):
colorbar_title = f.standard_name
if hasattr(f, 'Units'):
if str(f.Units) == '':
colorbar_title = colorbar_title + ''
else:
colorbar_title = colorbar_title + \
'(' + supscr(str(f.Units)) + ')'
# Return data
return(field, x, y, ptype, colorbar_title, xlabel, ylabel, xpole, ypole)
def check_data(field=None, x=None, y=None):
"""
| check_data - check user input contour data is correct.
| This is an internal routine and is not used by the user.
|
| field=None - field
| x=None - x points for field
| y=None - y points for field
|
|
|
|
|
|
"""
# Input error trapping
args = True
errstr = '\n'
if np.size(field) == 1:
if field is None:
errstr = errstr + 'con error - a field for contouring must be '
errstr += 'passed with the f= flag\n'
args = False
if np.size(x) == 1:
if x is None:
x = np.arange(np.shape(field)[1])
if np.size(y) == 1:
if y is None:
y = np.arange(np.shape(field)[0])
if not args:
raise Warning(errstr)
# Check input dimensions look okay.
# All inputs 2D
if np.ndim(field) == 2 and np.ndim(x) == 2 and np.ndim(y) == 2:
xpts = np.shape(field)[1]
ypts = np.shape(field)[0]
if xpts != np.shape(x)[1] or xpts != np.shape(y)[1]:
args = False
if ypts != np.shape(x)[0] or ypts != np.shape(y)[0]:
args = False
if args:
return
# Field x and y all 1D
if np.ndim(field) == 1 and np.ndim(x) == 1 and np.ndim(y) == 1:
if np.size(x) != np.size(field):
args = False
if np.size(y) != np.size(field):
args = False
if args:
return
# Field 2D, x and y 1D
if np.ndim(field) != 2:
args = False
if np.ndim(x) != 1:
args = False
if np.ndim(y) != 1:
args = False
if np.ndim(field) == 2:
if np.size(x) != np.shape(field)[1]:
args = False
if np.size(y) != np.shape(field)[0]:
args = False
if args is False:
errstr = errstr + 'Input arguments incorrectly shaped:\n'
errstr = errstr + 'x has shape:' + str(np.shape(x)) + '\n'
errstr = errstr + 'y has shape:' + str(np.shape(y)) + '\n'
errstr = errstr + 'field has shape' + str(np.shape(field)) + '\n\n'
errstr = errstr + 'Expected x=xpts, y=ypts, field=(ypts,xpts)\n'
errstr = errstr + 'x=npts, y=npts, field=npts\n'
errstr = errstr + \
'or x=[ypts, xpts], y=[ypts, xpts], field=[ypts, xpts]\n'
raise Warning(errstr)
def cscale(scale=None, ncols=None, white=None, below=None,
above=None, reverse=False, uniform=False):
"""
| cscale - choose and manipulate colour maps. Around 200 colour scales are
| available - see the gallery section for more details.
|
| scale=None - name of colour map
| ncols=None - number of colours for colour map
| white=None - change these colours to be white
| below=None - change the number of colours below the mid point of
| the colour scale to be this
| above=None - change the number of colours above the mid point of
| the colour scale to be this
| reverse=False - reverse the colour scale
| uniform=False - produce a uniform colour scale.
| For example: if below=3 and above=10 are specified
| then initially below=10 and above=10 are used. The
| colour scale is then cropped to use scale colours
| 6 to 19. This produces a more uniform intensity colour
| scale than one where all the blues are compressed into
| 3 colours.
|
|
| Personal colour maps are available by saving the map as red green blue
| to a file with a set of values on each line.
|
|
| Use cscale() To reset to the default settings.
|
:Returns:
None
|
|
|
|
"""
# If no map requested reset to default
if scale is None:
scale = 'scale1'
plotvars.cscale_flag = 0
return
else:
plotvars.cs_user = scale
plotvars.cscale_flag = 1
vals = [ncols, white, below, above]
if any(val is not None for val in vals):
plotvars.cscale_flag = 2
if reverse is not False or uniform is not False:
plotvars.cscale_flag = 2
if scale == 'scale1' or scale == '':
if scale == 'scale1':
myscale = cscale1
if scale == 'viridis':
myscale = viridis
# convert cscale1 or viridis from hex to rgb
r = []
g = []
b = []
for myhex in myscale:
myhex = myhex.lstrip('#')
mylen = len(myhex)
rgb = tuple(int(myhex[i:i + mylen // 3], 16)
for i in range(0, mylen, mylen // 3))
r.append(rgb[0])
g.append(rgb[1])
b.append(rgb[2])
else:
package_path = os.path.dirname(__file__)
file = os.path.join(package_path, 'colourmaps/' + scale + '.rgb')
if os.path.isfile(file) is False:
if os.path.isfile(scale) is False:
errstr = '\ncscale error - colour scale not found:\n'
errstr = errstr + 'File ' + file + ' not found\n'
errstr = errstr + 'File ' + scale + ' not found\n'
raise Warning(errstr)
else:
file = scale
# Read in rgb values and convert to hex
f = open(file, 'r')
lines = f.read()
lines = lines.splitlines()
r = []
g = []
b = []
for line in lines:
vals = line.split()
r.append(int(vals[0]))
g.append(int(vals[1]))
b.append(int(vals[2]))
# Reverse the colour scale if requested
if reverse:
r = r[::-1]
g = g[::-1]
b = b[::-1]
# Interpolate to a new number of colours if requested
if ncols is not None:
x = np.arange(np.size(r))
xnew = np.linspace(0, np.size(r) - 1, num=ncols, endpoint=True)
f_red = interpolate.interp1d(x, r)
f_green = interpolate.interp1d(x, g)
f_blue = interpolate.interp1d(x, b)
r = f_red(xnew)
g = f_green(xnew)
b = f_blue(xnew)
# Change the number of colours below and above the mid-point if requested
if below is not None or above is not None:
# Mid-point of colour scale
npoints = np.size(r) // 2
# Below mid point x locations
x_below = []
lower = 0
if below == 1:
x_below = 0
if below is not None:
lower = below
if below is None:
lower = npoints
if below is not None and uniform:
lower = max(above, below)
if (lower > 1):
x_below = ((npoints - 1) / float(lower - 1)) * np.arange(lower)
# Above mid point x locations
x_above = []
upper = 0
if above == 1:
x_above = npoints * 2 - 1
if above is not None:
upper = above
if above is None:
upper = npoints
if above is not None and uniform:
upper = max(above, below)
if (upper > 1):
x_above = ((npoints - 1) / float(upper - 1)) * \
np.arange(upper) + npoints
# Append new colour positions
xnew = np.append(x_below, x_above)
# Interpolate to new colour scale
xpts = np.arange(np.size(r))
f_red = interpolate.interp1d(xpts, r)
f_green = interpolate.interp1d(xpts, g)
f_blue = interpolate.interp1d(xpts, b)
r = f_red(xnew)
g = f_green(xnew)
b = f_blue(xnew)
# Reset colours if uniform is set
if uniform:
mid_pt = max(below, above)
r = r[mid_pt - below:mid_pt + above]
g = g[mid_pt - below:mid_pt + above]
b = b[mid_pt - below:mid_pt + above]
# Convert to hex
hexarr = []
for col in np.arange(np.size(r)):
hexarr.append('#%02x%02x%02x' % (int(r[col]), int(g[col]), int(b[col])))
# White requested colour positions
if white is not None:
if np.size(white) == 1:
hexarr[white] = '#ffffff'
else:
for col in white:
hexarr[col] = '#ffffff'
# Set colour scale
plotvars.cs = hexarr
def cscale_get_map():
"""
| cscale_get_map - return colour map for use in contour plots.
| This depends on the colour bar extensions
| This is an internal routine and is not used by the user.
|
|
:Returns:
colour map
|
|
|
|
|
"""
cscale_ncols = np.size(plotvars.cs)
if (plotvars.levels_extend == 'both'):
colmap = plotvars.cs[1:cscale_ncols - 1]
if (plotvars.levels_extend == 'min'):
colmap = plotvars.cs[1:]
if (plotvars.levels_extend == 'max'):
colmap = plotvars.cs[:cscale_ncols - 1]
if (plotvars.levels_extend == 'neither'):
colmap = plotvars.cs
return (colmap)
def bfill(f=None, x=None, y=None, clevs=False, lonlat=False, bound=False,
alpha=1.0, single_fill_color=None, white=True, zorder=None):
"""
| bfill - block fill a field with colour rectangles
| This is an internal routine and is not generally used by the user.
|
| f=None - field
| x=None - x points for field
| y=None - y points for field
| clevs=None - levels for filling
| lonlat=False - lonlat data
| bound=False - x and y are cf data boundaries
| alpha=alpha - transparency setting 0 to 1
| white=True - colour unplotted areas white
| single_fill_color=None - colour for a blockfill between two levels
| - makes maplotlib named colours or
| - hexadecimal notation - '#d3d3d3' for grey
| zorder=None - plotting order
|
:Returns:
None
|
|
|
|
"""
# If single_fill_color is defined then turn off whiting out the background.
if single_fill_color is not None:
white = False
# Set the default map coordinates for the data to be PlateCarree
plotargs = {}
if lonlat:
plotargs = {'transform': ccrs.PlateCarree()}
if isinstance(f, cf.Field):
if f.ref('transverse_mercator', default=False):
lonlat = True
# Case of transverse mercator of which UKCP is an example
ref = f.ref('transverse_mercator')
false_easting = ref['false_easting']
false_northing = ref['false_northing']
central_longitude = ref['longitude_of_central_meridian']
central_latitude = ref['latitude_of_projection_origin']
scale_factor = ref['scale_factor_at_central_meridian']
transform = ccrs.TransverseMercator(false_easting=false_easting,
false_northing=false_northing,
central_longitude=central_longitude,
central_latitude=central_latitude,
scale_factor=scale_factor)
# Extract the axes and data
xpts = np.append(f.dim('X').bounds.array[:, 0], f.dim('X').bounds.array[-1, 1])
ypts = np.append(f.dim('Y').bounds.array[:, 0], f.dim('Y').bounds.array[-1, 1])
field = np.squeeze(f.array)
plotargs = {'transform': transform}
else:
# Assign f to field as this may be modified in lat-lon plots
field = f
if bound:
xpts = x
ypts = y
else:
# Find x box boundaries
xpts = x[0] - (x[1] - x[0]) / 2.0
for ix in np.arange(np.size(x) - 1):
xpts = np.append(xpts, x[ix] + (x[ix + 1] - x[ix]) / 2.0)
xpts = np.append(xpts, x[ix + 1] + (x[ix + 1] - x[ix]) / 2.0)
# Find y box boundaries
ypts = y[0] - (y[1] - y[0]) / 2.0
for iy in np.arange(np.size(y) - 1):
ypts = np.append(ypts, y[iy] + (y[iy + 1] - y[iy]) / 2.0)
ypts = np.append(ypts, y[iy + 1] + (y[iy + 1] - y[iy]) / 2.0)
# Shift lon grid if needed
if lonlat:
# Extract upper bound and original rhs of box longitude bounding points
upper_bound = ypts[-1]
# Reduce xpts and ypts by 1 or shifting of grid fails
# The last points are the right / upper bounds for the last data box
xpts = xpts[0:-1]
ypts = ypts[0:-1]
if plotvars.lonmin < np.nanmin(xpts):
xpts = xpts - 360
if plotvars.lonmin > np.nanmax(xpts):
xpts = xpts + 360
# Add cyclic information if missing.
lonrange = np.nanmax(xpts) - np.nanmin(xpts)
if lonrange < 360:
# field, xpts = cartopy_util.add_cyclic_point(field, xpts)
field, xpts = add_cyclic(field, xpts)
right_bound = xpts[-1] + (xpts[-1] - xpts[-2])
# Add end x and y end points
xpts = np.append(xpts, right_bound)
ypts = np.append(ypts, upper_bound)
levels = np.array(deepcopy(clevs)).astype('float')
# Polar stereographic
# Set points past plotting limb to be plotvars.boundinglat
# Also set any lats past the pole to be the pole
if plotvars.proj == 'npstere':
pts = np.where(ypts < plotvars.boundinglat)
if np.size(pts) > 0:
ypts[pts] = plotvars.boundinglat
pts = np.where(ypts > 90.0)
if np.size(pts) > 0:
ypts[pts] = 90.0
if plotvars.proj == 'spstere':
pts = np.where(ypts > plotvars.boundinglat)
if np.size(pts) > 0:
ypts[pts] = plotvars.boundinglat
pts = np.where(ypts < -90.0)
if np.size(pts) > 0:
ypts[pts] = -90.0
# Generate a Matplotlib colour map
if single_fill_color is None:
cols = plotvars.cs
else:
cols = single_fill_color
cmap = matplotlib.colors.ListedColormap(cols)
if single_fill_color is None:
if plotvars.levels_extend == 'both' or plotvars.levels_extend == 'min':
levels = np.insert(levels, 0, -1e30)
if plotvars.levels_extend == 'both' or plotvars.levels_extend == 'max':
levels = np.append(levels, 1e30)
if plotvars.levels_extend == 'both' or plotvars.levels_extend == 'min':
cmap.set_under(plotvars.cs[0])
cols = cols[1:]
if plotvars.levels_extend == 'both' or plotvars.levels_extend == 'max':
cmap.set_over(plotvars.cs[-1])
cols = cols[:-1]
# Colour array for storing the cell colour. Start with -1 as the default
# as the colours run from 0 to np.size(levels)-1
colarr = np.zeros([np.shape(field)[0], np.shape(field)[1]])-1
for i in np.arange(np.size(levels)-1):
lev = levels[i]
pts = np.where(np.logical_and(field > lev, field <= levels[i+1]))
colarr[pts] = int(i)
# Change points that are masked back to -1
if isinstance(field, np.ma.MaskedArray):
pts = np.ma.where(field.mask)
if np.size(pts) > 0:
colarr[pts] = -1
if plotvars.plot_type == 1 and plotvars.proj != 'cyl':
for i in np.arange(np.size(levels)-1):
allverts = []
xy_stack = np.column_stack(np.where(colarr == i))
for pt in np.arange(np.shape(xy_stack)[0]):
ix = xy_stack[pt][1]
iy = xy_stack[pt][0]
lons = [xpts[ix], xpts[ix+1], xpts[ix+1], xpts[ix], xpts[ix]]
lats = [ypts[iy], ypts[iy], ypts[iy+1], ypts[iy+1], ypts[iy]]
txpts, typts = lons, lats
verts = [
(txpts[0], typts[0]),
(txpts[1], typts[1]),
(txpts[2], typts[2]),
(txpts[3], typts[3]),
(txpts[4], typts[4]),
]
allverts.append(verts)
# Make the collection and add it to the plot.
if single_fill_color is None:
color = plotvars.cs[i]
else:
color = single_fill_color
coll = PolyCollection(allverts, facecolor=color, edgecolors=color, alpha=alpha,
zorder=zorder, **plotargs)
if lonlat:
plotvars.mymap.add_collection(coll)
else:
plotvars.plot.add_collection(coll)
else:
for i in np.arange(np.size(levels)-1):
allverts = []
xy_stack = np.column_stack(np.where(colarr == i))
for pt in np.arange(np.shape(xy_stack)[0]):
ix = xy_stack[pt][1]
iy = xy_stack[pt][0]
verts = [
(xpts[ix], ypts[iy]),
(xpts[ix+1], ypts[iy]),
(xpts[ix+1], ypts[iy+1]),
(xpts[ix], ypts[iy+1]),
(xpts[ix], ypts[iy]),
]
allverts.append(verts)
# Make the collection and add it to the plot.
if single_fill_color is None:
color = plotvars.cs[i]
else:
color = single_fill_color
coll = PolyCollection(allverts, facecolor=color, edgecolors=color,
alpha=alpha, zorder=zorder, **plotargs)
if lonlat:
plotvars.mymap.add_collection(coll)
else:
plotvars.plot.add_collection(coll)
# Add white for undefined areas
if white:
allverts = []
xy_stack = np.column_stack(np.where(colarr == -1))
for pt in np.arange(np.shape(xy_stack)[0]):
ix = xy_stack[pt][1]
iy = xy_stack[pt][0]
verts = [
(xpts[ix], ypts[iy]),
(xpts[ix+1], ypts[iy]),
(xpts[ix+1], ypts[iy+1]),
(xpts[ix], ypts[iy+1]),
(xpts[ix], ypts[iy]),
]
allverts.append(verts)
# Make the collection and add it to the plot.
color = plotvars.cs[i]
coll = PolyCollection(allverts, facecolor='#ffffff', edgecolors='#ffffff',
alpha=alpha, zorder=zorder, **plotargs)
if lonlat:
plotvars.mymap.add_collection(coll)
else:
plotvars.plot.add_collection(coll)
def regrid(f=None, x=None, y=None, xnew=None, ynew=None):
"""
| regrid - bilinear interpolation of a grid to new grid locations
|
|
| f=None - original field
| x=None - original field x values
| y=None - original field y values
| xnew=None - new x points
| ynew=None - new y points
|
:Returns:
field values at requested locations
|
|
"""
# Copy input arrays
regrid_f = deepcopy(f)
regrid_x = deepcopy(x)
regrid_y = deepcopy(y)
fieldout = []
# Reverse xpts and field if necessary
if regrid_x[0] > regrid_x[-1]:
regrid_x = regrid_x[::-1]
regrid_f = np.fliplr(regrid_f)
# Reverse ypts and field if necessary
if regrid_y[0] > regrid_y[-1]:
regrid_y = regrid_y[::-1]
regrid_f = np.flipud(regrid_f)
# Iterate over the new grid to get the new grid values.
for i in np.arange(np.size(xnew)):
xval = xnew[i]
yval = ynew[i]
# Find position of new grid point in the x and y arrays
myxpos = find_pos_in_array(vals=regrid_x, val=xval)
myypos = find_pos_in_array(vals=regrid_y, val=yval)
myxpos2 = myxpos + 1
myypos2 = myypos + 1
if (myxpos2 != myxpos):
alpha = (xnew[i] - regrid_x[myxpos]) / \
(regrid_x[myxpos2] - regrid_x[myxpos])
else:
alpha = (xnew[i] - regrid_x[myxpos]) / 1E-30
newval1 = (regrid_f[myypos, myxpos] - regrid_f[myypos, myxpos2])
newval1 = newval1 * alpha
newval1 = regrid_f[myypos, myxpos] - newval1
newval2 = (regrid_f[myypos2, myxpos] - regrid_f[myypos2, myxpos2])
newval2 = newval2 * alpha
newval2 = regrid_f[myypos2, myxpos] - newval2
if (myypos2 != myypos):
alpha2 = (ynew[i] - regrid_y[myypos])
alpha2 = alpha2 / (regrid_y[myypos2] - regrid_y[myypos])
else:
alpha2 = (ynew[i] - regrid_y[myypos]) / 1E-30
newval3 = newval1 - (newval1 - newval2) * alpha2
fieldout = np.append(fieldout, newval3)
return fieldout
def stipple(f=None, x=None, y=None, min=None, max=None,
size=80, color='k', pts=50, marker='.', edgecolors='k',
alpha=1.0, ylog=False, zorder=1):
"""
| stipple - put markers on a plot to indicate value of interest
|
| f=None - cf field or field
| x=None - x points for field
| y=None - y points for field
| min=None - minimum threshold for stipple
| max=None - maximum threshold for stipple
| size=80 - default size for stipples
| color='k' - default colour for stipples
| pts=50 - number of points in the x direction
| marker='.' - default marker for stipples
| edegecolors='k' - outline colour
| alpha=1.0 - transparency setting - default is off
| ylog=False - set to True if a log pressure stipple plot
| is required
| zorder=2 - plotting order
|
|
:Returns:
None
|
|
"""
if plotvars.plot_type not in [1, 2, 3]:
errstr = '\n stipple error - only X-Y, X-Z and Y-Z \n'
errstr = errstr + 'stipple supported at the present time\n'
errstr = errstr + 'Please raise a feature request if you see this error.\n'
raise Warning(errstr)
# Extract required data for contouring
# If a cf-python field
if isinstance(f, cf.Field):
colorbar_title = ''
field, xpts, ypts, ptype, colorbar_title, xlabel, ylabel, xpole, \
ypole = cf_data_assign(f, colorbar_title)
elif isinstance(f, cf.FieldList):
raise TypeError("Can't plot a field list")
else:
field = f # field data passed in as f
check_data(field, x, y)
xpts = x
ypts = y
if plotvars.plot_type == 1:
# Cylindrical projection
# Add cyclic information if missing.
lonrange = np.nanmax(xpts) - np.nanmin(xpts)
if lonrange < 360:
# field, xpts = cartopy_util.add_cyclic_point(field, xpts)
field, xpts = add_cyclic(field, xpts)
if plotvars.proj == 'cyl':
# Calculate interpolation points
xnew, ynew = stipple_points(xmin=np.nanmin(xpts),
xmax=np.nanmax(xpts),
ymin=np.nanmin(ypts),
ymax=np.nanmax(ypts),
pts=pts, stype=2)
# Calculate points in map space
xnew_map = xnew
ynew_map = ynew
if plotvars.proj == 'npstere' or plotvars.proj == 'spstere':
# Calculate interpolation points
xnew, ynew, xnew_map, ynew_map = polar_regular_grid()
# Convert longitudes to be 0 to 360
# negative longitudes are incorrectly regridded in polar stereographic projection
xnew = np.mod(xnew + 360.0, 360.0)
if plotvars.plot_type >= 2 and plotvars.plot_type <= 3:
# Flip data if a lat-height plot and lats start at the north pole
if plotvars.plot_type == 2:
if xpts[0] > xpts[-1]:
xpts = xpts[::-1]
field = np.fliplr(field)
# Calculate interpolation points
ymin = np.nanmin(ypts)
ymax = np.nanmax(ypts)
if ylog:
ymin = np.log10(ymin)
ymax = np.log10(ymax)
xnew, ynew = stipple_points(xmin=np.nanmin(xpts),
xmax=np.nanmax(xpts),
ymin=ymin,
ymax=ymax,
pts=pts, stype=2)
if ylog:
ynew = 10**ynew
# Get values at the new points
vals = regrid(f=field, x=xpts, y=ypts, xnew=xnew, ynew=ynew)
# Work out which of the points are valid
valid_points = np.array([], dtype='int64')
for i in np.arange(np.size(vals)):
if vals[i] >= min and vals[i] <= max:
valid_points = np.append(valid_points, i)
if plotvars.plot_type == 1:
proj = ccrs.PlateCarree()
if np.size(valid_points) > 0:
plotvars.mymap.scatter(xnew[valid_points], ynew[valid_points],
s=size, c=color, marker=marker,
edgecolors=edgecolors,
alpha=alpha, transform=proj, zorder=zorder)
if plotvars.plot_type >= 2 and plotvars.plot_type <= 3:
plotvars.plot.scatter(xnew[valid_points], ynew[valid_points],
s=size, c=color, marker=marker,
edgecolors=edgecolors,
alpha=alpha, zorder=zorder)
def stipple_points(xmin=None, xmax=None, ymin=None,
ymax=None, pts=None, stype=None):
"""
| stipple_points - calculate interpolation points
|
| xmin=None - plot x minimum
| ymax=None - plot x maximum
| ymin=None - plot y minimum
| ymax=None - plot x maximum
| pts=None - number of points in the x and y directions
| one number gives the same in both directions
|
| stype=None - type of grid. 1=regular, 2=offset
|
|
|
:Returns:
stipple locations in x and y
|
|
"""
# Work out number of points in x and y directions
if np.size(pts) == 1:
pts_x = pts
pts_y = pts
if np.size(pts) == 2:
pts_x = pts[0]
pts_y = pts[1]
# Create regularly spaced points
xstep = (xmax - xmin) / float(pts_x)
x1 = [xmin + xstep / 4]
while (np.nanmax(x1) + xstep) < xmax - xstep / 10:
x1 = np.append(x1, np.nanmax(x1) + xstep)
x2 = [xmin + xstep * 3 / 4]
while (np.nanmax(x2) + xstep) < xmax - xstep / 10:
x2 = np.append(x2, np.nanmax(x2) + xstep)
ystep = (ymax - ymin) / float(pts_y)
y1 = [ymin + ystep / 2]
while (np.nanmax(y1) + ystep) < ymax - ystep / 10:
y1 = np.append(y1, np.nanmax(y1) + ystep)
# Create interpolation points
xnew = []
ynew = []
iy = 0
for y in y1:
iy = iy + 1
if stype == 1:
xnew = np.append(xnew, x1)
y2 = np.zeros(np.size(x1))
y2.fill(y)
ynew = np.append(ynew, y2)
if stype == 2:
if iy % 2 == 0:
xnew = np.append(xnew, x1)
y2 = np.zeros(np.size(x1))
y2.fill(y)
ynew = np.append(ynew, y2)
if iy % 2 == 1:
xnew = np.append(xnew, x2)
y2 = np.zeros(np.size(x2))
y2.fill(y)
ynew = np.append(ynew, y2)
return xnew, ynew
def find_pos_in_array(vals=None, val=None, above=False):
"""
| find_pos_in_array - find the position of a point in an array
|
| vals - array values
| val - value to find position of
|
|
|
|
|
|
:Returns:
position in array
|
|
|
"""
pos = -1
if above is False:
for myval in vals:
if val > myval:
pos = pos + 1
if above:
for myval in vals:
if val >= myval:
pos = pos + 1
if np.size(vals) - 1 > pos:
pos = pos + 1
return pos
def vect(u=None, v=None, x=None, y=None, scale=None, stride=None, pts=None,
key_length=None, key_label=None, ptype=None, title=None, magmin=None,
width=0.02, headwidth=3, headlength=5, headaxislength=4.5,
pivot='middle', key_location=[0.95, -0.06], key_show=True, axes=True,
xaxis=True, yaxis=True, xticks=None, xticklabels=None, yticks=None,
yticklabels=None, xlabel=None, ylabel=None, ylog=False, color='k',
zorder=1):
"""
| vect - plot vectors
|
| u=None - u wind
| v=None - v wind
| x=None - x locations of u and v
| y=None - y locations of u and v
| scale=None - data units per arrow length unit. A smaller values gives
| a larger vector. Generally takes one value but in the case
| of two supplied values the second vector scaling applies to
| the v field.
| stride=None - plot vector every stride points. Can take two values one
| for x and one for y.
| pts=None - use bilinear interpolation to interpolate vectors onto a new
| grid - takes one or two values.
| If one value is passed then this is used for both the x and
| y axes.
| magmin=None - don't plot any vects with less than this magnitude.
| key_length=None - length of the key. Generally takes one value but in
| the case of two supplied values the second vector
| scaling applies to the v field.
| key_label=None - label for the key. Generally takes one value but in the
| case of two supplied values the second vector scaling
| applies to the v field.
| key_location=[0.9, -0.06] - location of the vector key relative to the
| plot in normalised coordinates.
| key_show=True - draw the key. Set to False if not required.
| ptype=0 - plot type - not needed for cf fields.
| 0 = no specific plot type,
| 1 = longitude-latitude,
| 2 = latitude - height,
| 3 = longitude - height,
| 4 = latitude - time,
| 5 = longitude - time
| 6 = rotated pole
|
| title=None - plot title
| width=0.005 - shaft width in arrow units; default is 0.005 times the
| width of the plot
| headwidth=3 - head width as multiple of shaft width, default is 3
| headlength=5 - head length as multiple of shaft width, default is 5
| headaxislength=4.5 - head length at shaft intersection, default is 4.5
| pivot='middle' - the part of the arrow that is at the grid point; the
| arrow rotates about this point
takes 'tail', 'middle', 'tip'
| axes=True - plot x and y axes
| xaxis=True - plot xaxis
| yaxis=True - plot y axis
| xticks=None - xtick positions
| xticklabels=None - xtick labels
| yticks=None - y tick positions
| yticklabels=None - ytick labels
| xlabel=None - label for x axis
| ylabel=None - label for y axis
| ylog=False - log y axis
| color='k' - colour for the vectors - default is black.
| zorder=3 - plotting order
|
:Returns:
None
|
|
|
"""
# If the vector color is white set the quicker key colour to black
# so that it can be seen
qkey_color = color
if qkey_color == 'w' or qkey_color == 'white':
qkey_color = 'k'
colorbar_title = ''
text_fontsize = plotvars.text_fontsize
continent_thickness = plotvars.continent_thickness
continent_color = plotvars.continent_color
if text_fontsize is None:
text_fontsize = 11
if continent_thickness is None:
continent_thickness = 1.5
if continent_color is None:
continent_color = 'k'
# ylog=plotvars.ylog
title_fontsize = plotvars.title_fontsize
title_fontweight = plotvars.title_fontweight
if title_fontsize is None:
title_fontsize = 15
resolution_orig = plotvars.resolution
# Set potential user axis labels
user_xlabel = xlabel
user_ylabel = ylabel
rotated_vect = False
if isinstance(u, cf.Field):
if u.ref('rotated_latitude_longitude', default=False):
rotated_vect = True
# Extract required data
# If a cf-python field
if isinstance(u, cf.Field):
# Check data is 2D
ndims = np.squeeze(u.data).ndim
if ndims != 2:
errstr = "\n\ncfp.vect error need a 2 dimensonal u field to make vectors\n"
errstr += "received " + str(np.squeeze(u.data).ndim)
if ndims == 1:
errstr += " dimension\n\n"
else:
errstr += " dimensions\n\n"
raise TypeError(errstr)
u_data, u_x, u_y, ptype, colorbar_title, xlabel, ylabel, xpole, \
ypole = cf_data_assign(u, colorbar_title, rotated_vect=rotated_vect)
elif isinstance(u, cf.FieldList):
raise TypeError("Can't plot a field list")
else:
# field=f #field data passed in as f
check_data(u, x, y)
u_data = deepcopy(u)
u_x = deepcopy(x)
u_y = deepcopy(y)
xlabel = ''
ylabel = ''
if isinstance(v, cf.Field):
# Check data is 2D
ndims = np.squeeze(v.data).ndim
if ndims != 2:
errstr = "\n\ncfp.vect error need a 2 dimensonal v field to make vectors\n"
errstr += "received " + str(np.squeeze(v.data).ndim)
if ndims == 1:
errstr += " dimension\n\n"
else:
errstr += " dimensions\n\n"
raise TypeError(errstr)
v_data, v_x, v_y, ptype, colorbar_title, xlabel, ylabel, xpole, \
ypole = cf_data_assign(v, colorbar_title, rotated_vect=rotated_vect)
elif isinstance(v, cf.FieldList):
raise TypeError("Can't plot a field list")
else:
# field=f #field data passed in as f
check_data(v, x, y)
v_data = deepcopy(v)
v_x = deepcopy(x)
xlabel = ''
ylabel = ''
# If a minimum magnitude is specified mask these data points
if magmin is not None:
mag = np.sqrt(u_data**2 + v_data**2)
invalid = np.where(mag <= magmin)
if np.size(invalid) > 0:
u_data[invalid] = np.nan
v_data[invalid] = np.nan
# Reset xlabel and ylabel values with user defined labels in specified
if user_xlabel is not None:
xlabel = user_xlabel
if user_ylabel is not None:
ylabel = user_ylabel
# Retrieve any user defined axis labels
if xlabel == '' and plotvars.xlabel is not None:
xlabel = plotvars.xlabel
if ylabel == '' and plotvars.ylabel is not None:
ylabel = plotvars.ylabel
if xticks is None and plotvars.xticks is not None:
xticks = plotvars.xticks
if plotvars.xticklabels is not None:
xticklabels = plotvars.xticklabels
else:
xticklabels = list(map(str, xticks))
if yticks is None and plotvars.yticks is not None:
yticks = plotvars.yticks
if plotvars.yticklabels is not None:
yticklabels = plotvars.yticklabels
else:
yticklabels = list(map(str, yticks))
if scale is None:
scale = np.nanmax(u_data) / 4.0
if key_length is None:
key_length = scale
# Open a new plot if necessary
if plotvars.user_plot == 0:
gopen(user_plot=0)
# Set plot type if user specified
if (ptype is not None):
plotvars.plot_type = ptype
lonrange = np.nanmax(u_x) - np.nanmin(u_x)
latrange = np.nanmax(u_y) - np.nanmin(u_y)
if plotvars.plot_type == 1:
# Set up mapping
if (lonrange > 350 and latrange > 170) or plotvars.user_mapset == 1:
set_map()
else:
mapset(lonmin=np.nanmin(u_x), lonmax=np.nanmax(u_x),
latmin=np.nanmin(u_y), latmax=np.nanmax(u_y),
user_mapset=0, resolution=resolution_orig)
set_map()
mymap = plotvars.mymap
# u_data, u_x = cartopy_util.add_cyclic_point(u_data, u_x)
u_data, u_x = add_cyclic(u_data, u_x)
# v_data, v_x = cartopy_util.add_cyclic_point(v_data, v_x)
v_data, v_x = add_cyclic(v_data, v_x)
# stride data points to reduce vector density
if stride is not None:
if np.size(stride) == 1:
xstride = stride
ystride = stride
if np.size(stride) == 2:
xstride = stride[0]
ystride = stride[1]
u_x = u_x[0::xstride]
u_y = u_y[0::ystride]
u_data = u_data[0::ystride, 0::xstride]
v_data = v_data[0::ystride, 0::xstride]
# Map vectors
if plotvars.plot_type == 1:
lonmax = plotvars.lonmax
proj = ccrs.PlateCarree()
if pts is None:
quiv = plotvars.mymap.quiver(u_x, u_y, u_data, v_data, scale=scale,
pivot=pivot, units='inches',
width=width, headwidth=headwidth,
headlength=headlength,
headaxislength=headaxislength,
color=color, transform=proj,
zorder=zorder)
else:
if plotvars.proj == 'cyl':
# **cartopy 0.16 fix for longitide points in cylindrical projection
# when regridding to a number of points
# Make points within the plotting region
for pt in np.arange(np.size(u_x)):
if u_x[pt] > lonmax:
u_x[pt] = u_x[pt]-360
quiv = plotvars.mymap.quiver(u_x, u_y, u_data, v_data, scale=scale,
pivot=pivot, units='inches',
width=width, headwidth=headwidth,
headlength=headlength,
headaxislength=headaxislength,
color=color,
regrid_shape=pts, transform=proj,
zorder=zorder)
# Make key_label if none exists
if key_label is None:
key_label = str(key_length)
if isinstance(u, cf.Field):
key_label = supscr(key_label + u.units)
if key_show:
plotvars.mymap.quiverkey(quiv, key_location[0],
key_location[1],
key_length,
key_label, labelpos='W',
color=qkey_color,
fontproperties={'size': str(plotvars.axis_label_fontsize)},
coordinates='axes')
# axes
plot_map_axes(axes=axes, xaxis=xaxis, yaxis=yaxis,
xticks=xticks, xticklabels=xticklabels,
yticks=yticks, yticklabels=yticklabels,
user_xlabel=user_xlabel, user_ylabel=user_ylabel,
verbose=False)
# Coastlines
continent_thickness = plotvars.continent_thickness
continent_color = plotvars.continent_color
continent_linestyle = plotvars.continent_linestyle
if continent_thickness is None:
continent_thickness = 1.5
if continent_color is None:
continent_color = 'k'
if continent_linestyle is None:
continent_linestyle = 'solid'
feature = cfeature.NaturalEarthFeature(name='land', category='physical',
scale=plotvars.resolution,
facecolor='none')
mymap.add_feature(feature, edgecolor=continent_color,
linewidth=continent_thickness,
linestyle=continent_linestyle)
# Title
if title is not None:
map_title(title)
if plotvars.plot_type == 6:
if u.ref('rotated_latitude_longitude', False):
proj = ccrs.PlateCarree()
# Set up mapping
if (lonrange > 350 and latrange > 170) or plotvars.user_mapset == 1:
set_map()
else:
mapset(lonmin=np.nanmin(u_x), lonmax=np.nanmax(u_x),
latmin=np.nanmin(u_y), latmax=np.nanmax(u_y),
user_mapset=0, resolution=resolution_orig)
set_map()
quiv = plotvars.mymap.quiver(u_x, u_y, u_data, v_data, scale=scale*10, transform=proj,
pivot=pivot, units='inches',
width=width, headwidth=headwidth,
headlength=headlength,
headaxislength=headaxislength,
color=color, zorder=zorder)
# Make key_label if none exists
if key_label is None:
key_label = str(key_length)
if isinstance(u, cf.Field):
key_label = supscr(key_label + u.units)
if key_show:
plotvars.mymap.quiverkey(quiv, key_location[0],
key_location[1],
key_length,
key_label, labelpos='W',
color=qkey_color,
fontproperties={'size': str(plotvars.axis_label_fontsize)},
coordinates='axes')
# Axes on the native grid
if plotvars.plot == 'rotated':
rgaxes(xpole=xpole, ypole=ypole, xvec=x, yvec=y,
xticks=xticks, xticklabels=xticklabels,
yticks=yticks, yticklabels=yticklabels,
axes=axes, xaxis=xaxis, yaxis=yaxis,
xlabel=xlabel, ylabel=ylabel)
if plotvars.plot == 'cyl':
plot_map_axes(axes=axes, xaxis=xaxis, yaxis=yaxis,
xticks=xticks, xticklabels=xticklabels,
yticks=yticks, yticklabels=yticklabels,
user_xlabel=user_xlabel, user_ylabel=user_ylabel,
verbose=False)
# Title
if title is not None:
map_title(title)
######################################
# Latitude or longitude vs height plot
######################################
if plotvars.plot_type == 2 or plotvars.plot_type == 3:
user_gset = plotvars.user_gset
if user_gset == 0:
# Program selected data plot limits
xmin = np.nanmin(u_x)
xmax = np.nanmax(u_x)
if plotvars.plot_type == 2:
if xmin < -80 and xmin >= -90:
xmin = -90
if xmax > 80 and xmax <= 90:
xmax = 90
ymin = np.nanmin(u_y)
if ymin <= 10:
ymin = 0
ymax = np.nanmax(u_y)
else:
# User specified plot limits
xmin = plotvars.xmin
xmax = plotvars.xmax
if plotvars.ymin < plotvars.ymax:
ymin = plotvars.ymin
ymax = plotvars.ymax
else:
ymin = plotvars.ymax
ymax = plotvars.ymin
ystep = None
if (ymax == 1000):
ystep = 100
if (ymax == 100000):
ystep = 10000
ytype = 0 # pressure or similar y axis
if 'theta' in ylabel.split(' '):
ytype = 1
if 'height' in ylabel.split(' '):
ytype = 1
ystep = 100
if (ymax - ymin) > 5000:
ystep = 500.0
if (ymax - ymin) > 10000:
ystep = 1000.0
if (ymax - ymin) > 50000:
ystep = 10000.0
# Set plot limits and draw axes
if ylog != 1:
if ytype == 1:
gset(
xmin=xmin,
xmax=xmax,
ymin=ymin,
ymax=ymax,
user_gset=user_gset)
else:
gset(
xmin=xmin,
xmax=xmax,
ymin=ymax,
ymax=ymin,
user_gset=user_gset)
# Set default x-axis labels
lltype = 1
if plotvars.plot_type == 2:
lltype = 2
llticks, lllabels = mapaxis(min=xmin, max=xmax, type=lltype)
heightticks = gvals(
dmin=ymin,
dmax=ymax,
mystep=ystep,
mod=False)[0]
heightlabels = heightticks
if axes:
if xaxis:
if xticks is not None:
llticks = xticks
lllabels = xticks
if xticklabels is not None:
lllabels = xticklabels
else:
llticks = [100000000]
xlabel = ''
if yaxis:
if yticks is not None:
heightticks = yticks
heightlabels = yticks
if yticklabels is not None:
heightlabels = yticklabels
else:
heightticks = [100000000]
ylabel = ''
else:
llticks = [100000000]
heightticks = [100000000]
xlabel = ''
ylabel = ''
axes_plot(xticks=llticks, xticklabels=lllabels,
yticks=heightticks, yticklabels=heightlabels,
xlabel=xlabel, ylabel=ylabel)
# Log y axis
if ylog:
if ymin == 0:
ymin = 1 # reset zero mb/height input to a small value
gset(xmin=xmin,
xmax=xmax,
ymin=ymax,
ymax=ymin,
ylog=1,
user_gset=user_gset)
llticks, lllabels = mapaxis(min=xmin,
max=xmax,
type=plotvars.plot_type)
if axes:
if xaxis:
if xticks is not None:
llticks = xticks
lllabels = xticks
if xticklabels is not None:
lllabels = xticklabels
else:
llticks = [100000000]
xlabel = ''
if yaxis:
if yticks is not None:
heightticks = yticks
heightlabels = yticks
if yticklabels is not None:
heightlabels = yticklabels
else:
heightticks = [100000000]
ylabel = ''
if yticks is None:
axes_plot(
xticks=llticks,
xticklabels=lllabels,
xlabel=xlabel,
ylabel=ylabel)
else:
axes_plot(xticks=llticks, xticklabels=lllabels,
yticks=heightticks, yticklabels=heightlabels,
xlabel=xlabel, ylabel=ylabel)
# Regrid the data if requested
if pts is not None:
xnew, ynew = stipple_points(xmin=np.min(u_x), xmax=np.max(u_x),
ymin=np.min(u_y), ymax=np.max(u_y),
pts=pts, stype=1)
if ytype == 0:
# Make y interpolation in log space as we have a pressure coordinate
u_vals = regrid(f=u_data, x=u_x, y=np.log10(u_y), xnew=xnew, ynew=np.log10(ynew))
v_vals = regrid(f=v_data, x=u_x, y=np.log10(u_y), xnew=xnew, ynew=np.log10(ynew))
else:
u_vals = regrid(f=u_data, x=u_x, y=u_y, xnew=xnew, ynew=ynew)
v_vals = regrid(f=v_data, x=u_x, y=u_y, xnew=xnew, ynew=ynew)
u_x = xnew
u_y = ynew
u_data = u_vals
v_data = v_vals
# set scale and key lengths
if np.size(scale) == 1:
scale_u = scale
scale_v = scale
else:
scale_u = scale[0]
scale_v = scale[1]
if np.size(key_length) == 2:
key_length_u = key_length[0]
key_length_v = key_length[1]
# scale v data
v_data = v_data * scale_u / scale_v
else:
key_length_u = key_length
# Plot the vectors
quiv = plotvars.plot.quiver(u_x, u_y, u_data, v_data, pivot=pivot,
units='inches', scale=scale_u,
width=width, headwidth=headwidth,
headlength=headlength,
headaxislength=headaxislength,
color=color, zorder=zorder)
# Plot single key
if np.size(scale) == 1:
# Single scale vector
if key_label is None:
key_label_u = str(key_length_u)
if isinstance(u, cf.Field):
key_label_u = supscr(key_label_u + ' (' + u.units + ')')
else:
key_label_u = key_label[0]
if key_show:
plotvars.plot.quiverkey(quiv, key_location[0],
key_location[1],
key_length_u, key_label_u,
labelpos='W',
color=qkey_color,
fontproperties={'size': str(plotvars.axis_label_fontsize)})
# Plot two keys
if np.size(scale) == 2:
# translate from normalised units to plot units
xpos = key_location[0] * \
(plotvars.xmax - plotvars.xmin) + plotvars.xmin
ypos = key_location[1] * \
(plotvars.ymax - plotvars.ymin) + plotvars.ymin
# horizontal and vertical spacings for offsetting vector reference
# text
xoffset = 0.01 * abs(plotvars.xmax - plotvars.xmin)
yoffset = 0.01 * abs(plotvars.ymax - plotvars.ymin)
# Assign key labels if necessary
if key_label is None:
key_label_u = str(key_length_u)
key_label_v = str(key_length_v)
if isinstance(u, cf.Field):
key_label_u = supscr(key_label_u + ' (' + u.units + ')')
if isinstance(v, cf.Field):
key_label_v = supscr(key_label_v + ' (' + v.units + ')')
else:
key_label_u = supscr(key_label[0])
key_label_v = supscr(key_label[1])
# Plot reference vectors and keys
if key_show:
plotvars.plot.quiver(xpos, ypos, key_length[0], 0,
pivot='tail', units='inches',
scale=scale[0],
headaxislength=headaxislength,
width=width, headwidth=headwidth,
headlength=headlength,
clip_on=False,
color=qkey_color)
plotvars.plot.quiver(xpos, ypos, 0, key_length[1],
pivot='tail', units='inches',
scale=scale[1],
headaxislength=headaxislength,
width=width, headwidth=headwidth,
headlength=headlength,
clip_on=False,
color=qkey_color)
plotvars.plot.text(xpos,
ypos + yoffset,
key_label_u,
horizontalalignment='left',
verticalalignment='top')
plotvars.plot.text(xpos - xoffset,
ypos,
key_label_v,
horizontalalignment='right',
verticalalignment='bottom')
if title is not None:
plotvars.plot.set_title(title,
y=1.03,
fontsize=plotvars.title_fontsize,
fontweight=title_fontweight)
##########
# Save plot
##########
if plotvars.user_plot == 0:
gset()
cscale()
gclose()
if plotvars.user_mapset == 0:
mapset()
mapset(resolution=resolution_orig)
def set_map():
"""
| set_map - set map and write into plotvars.mymap
|
| No inputs
| This is an internal routine and not used by the user
|
|
|
|
|
:Returns:
None
|
|
|
"""
# Set up mapping
extent = True
lon_mid = plotvars.lonmin + (plotvars.lonmax - plotvars.lonmin) / 2.0
lonmin = plotvars.lonmin
lonmax = plotvars.lonmax
latmin = plotvars.latmin
latmax = plotvars.latmax
if plotvars.proj == 'cyl':
proj = ccrs.PlateCarree(central_longitude=lon_mid)
# Cartopy line plotting and identical left == right fix
if lonmax - lonmin == 360.0:
lonmax = lonmax + 0.01
if plotvars.proj == 'merc':
min_latitude = -80.0
if plotvars.lonmin > min_latitude:
min_latitude = plotvars.lonmin
max_latitude = 84.0
if plotvars.lonmax < max_latitude:
max_latitude = plotvars.lonmax
proj = ccrs.Mercator(central_longitude=plotvars.lon_0,
min_latitude=min_latitude,
max_latitude=max_latitude)
if plotvars.proj == 'npstere':
proj = ccrs.NorthPolarStereo(central_longitude=plotvars.lon_0)
# **cartopy 0.16 fix
# Here we add in 0.01 to the longitude extent as this helps with plotting
# lines and line labels
lonmin = plotvars.lon_0-180
lonmax = plotvars.lon_0+180.01
latmin = plotvars.boundinglat
latmax = 90
if plotvars.proj == 'spstere':
proj = ccrs.SouthPolarStereo(central_longitude=plotvars.lon_0)
# **cartopy 0.16 fix
# Here we add in 0.01 to the longitude extent as this helps with plotting
# lines and line labels
lonmin = plotvars.lon_0-180
lonmax = plotvars.lonmax+180.01
latmin = -90
latmax = plotvars.boundinglat
if plotvars.proj == 'ortho':
proj = ccrs.Orthographic(central_longitude=plotvars.lon_0,
central_latitude=plotvars.lat_0)
lonmin = plotvars.lon_0-180.0
lonmax = plotvars.lon_0+180.01
extent = False
if plotvars.proj == 'moll':
proj = ccrs.Mollweide(central_longitude=plotvars.lon_0)
lonmin = plotvars.lon_0-180.0
lonmax = plotvars.lon_0+180.01
extent = False
if plotvars.proj == 'robin':
proj = ccrs.Robinson(central_longitude=plotvars.lon_0)
if plotvars.proj == 'lcc':
latmin = plotvars.latmin
latmax = plotvars.latmax
lonmin = plotvars.lonmin
lonmax = plotvars.lonmax
lon_0 = lonmin+(lonmax-lonmin)/2.0
lat_0 = latmin+(latmax-latmin)/2.0
cutoff = -40
if lat_0 <= 0:
cutoff = 40
standard_parallels = [33, 45]
if latmin <= 0 and latmax <= 0:
standard_parallels = [-45, -33]
proj = ccrs.LambertConformal(central_longitude=lon_0,
central_latitude=lat_0,
cutoff=cutoff, standard_parallels=standard_parallels)
if plotvars.proj == 'rotated':
proj = ccrs.PlateCarree(central_longitude=lon_mid)
if plotvars.proj == 'OSGB':
proj = ccrs.OSGB()
if plotvars.proj == 'EuroPP':
proj = ccrs.EuroPP()
if plotvars.proj == 'UKCP':
# Special case of TransverseMercator for UKCP
proj = ccrs.TransverseMercator()
if plotvars.proj == 'TransverseMercator':
proj = ccrs.TransverseMercator()
lonmin = plotvars.lon_0-180.0
lonmax = plotvars.lon_0+180.01
extent = False
# Add a plot containing the projection
if plotvars.plot_xmin:
delta_x = plotvars.plot_xmax - plotvars.plot_xmin
delta_y = plotvars.plot_ymax - plotvars.plot_ymin
mymap = plotvars.master_plot.add_axes([plotvars.plot_xmin,
plotvars.plot_ymin,
delta_x, delta_y],
projection=proj)
else:
mymap = plotvars.master_plot.add_subplot(plotvars.rows,
plotvars.columns,
plotvars.pos,
projection=proj)
# Set map extent
set_extent = True
if plotvars.proj in ['OSGB', 'EuroPP', 'UKCP', 'robin', 'lcc']:
set_extent = False
if extent and set_extent:
mymap.set_extent([lonmin, lonmax, latmin, latmax], crs=ccrs.PlateCarree())
# Set the scaling for PlateCarree
if plotvars.proj == 'cyl':
mymap.set_aspect(plotvars.aspect)
if plotvars.proj == 'lcc':
# Special case of lcc
mymap.set_extent([lonmin, lonmax, latmin, latmax], crs=ccrs.PlateCarree())
if plotvars.proj == 'UKCP':
# Special case of TransverseMercator for UKCP
mymap.set_extent([-11, 3, 49, 61], crs=ccrs.PlateCarree())
if plotvars.proj == 'EuroPP':
# EuroPP somehow needs some limits setting.
mymap.set_extent([-12, 25, 30, 75], crs=ccrs.PlateCarree())
# Remove any plotvars.plot axes leaving just the plotvars.mymap axes
plotvars.plot.set_frame_on(False)
plotvars.plot.set_xticks([])
plotvars.plot.set_yticks([])
# Store map
plotvars.mymap = mymap
def polar_regular_grid(pts=50):
"""
| polar_regular_grid - return a regular grid over a polar
| stereographic area
|
| pts=50 - number of grid points in the x and y directions
|
|
|
|
|
|
:Returns:
lons, lats of grid in degrees
x, y locations of lons and lats
|
|
|
"""
boundinglat = plotvars.boundinglat
lon_0 = plotvars.lon_0
if plotvars.proj == 'npstere':
thisproj = ccrs.NorthPolarStereo(central_longitude=lon_0)
else:
thisproj = ccrs.SouthPolarStereo(central_longitude=lon_0)
# Find min and max of plotting region in device coordinates
lons = np.array([lon_0-90, lon_0, lon_0+90, lon_0+180])
lats = np.array([boundinglat, boundinglat, boundinglat, boundinglat])
extent = thisproj.transform_points(ccrs.PlateCarree(), lons, lats)
xmin = np.min(extent[:, 0])
xmax = np.max(extent[:, 0])
ymin = np.min(extent[:, 1])
ymax = np.max(extent[:, 1])
# Make up a stipple of points for cover the pole
points_device = stipple_points(
xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, pts=pts, stype=2)
xnew = np.array(points_device)[0, :]
ynew = np.array(points_device)[1, :]
points_polar = ccrs.PlateCarree().transform_points(thisproj, xnew, ynew)
lons = np.array(points_polar)[:, 0]
lats = np.array(points_polar)[:, 1]
if plotvars.proj == 'npstere':
valid = np.where(lats >= boundinglat)
else:
valid = np.where(lats <= boundinglat)
return lons[valid], lats[valid], xnew[valid], ynew[valid]
def cf_var_name(field=None, dim=None):
"""
| cf_var_name - return the name from a supplied dimension
| in the following order
| ncvar
| short_name
| long_name
| standard_name
|
| field=None - field
| dim=None - dimension required - 'dim0', 'dim1' etc.
|
|
|
|
|
:Returns:
name
|
|
|
"""
id = getattr(field.construct(dim), 'id', False)
ncvar = field.construct(dim).nc_get_variable(False)
short_name = getattr(field.construct(dim), 'short_name', False)
long_name = getattr(field.construct(dim), 'long_name', False)
standard_name = getattr(field.construct(dim), 'standard_name', False)
name = 'No Name'
if id:
name = id
if ncvar:
name = ncvar
if short_name:
name = short_name
if long_name:
name = long_name
if standard_name:
name = standard_name
return name
def cf_var_name_titles(field=None, dim=None):
"""
| cf_var_name - return the name from a supplied dimension
| in the following preference order:
| standard_name
| long_name
| short_name
| ncvar
|
| field=None - field
| dim=None - dimension required - 'dim0', 'dim1' etc.
|
:Returns:
name
"""
name = None
units = None
if field.has_construct(dim):
id = getattr(field.construct(dim), 'id', False)
ncvar = field.construct(dim).nc_get_variable(False)
short_name = getattr(field.construct(dim), 'short_name', False)
long_name = getattr(field.construct(dim), 'long_name', False)
standard_name = getattr(field.construct(dim), 'standard_name', False)
#name = 'No Name'
if id:
name = id
if ncvar:
name = ncvar
if short_name:
name = short_name
if long_name:
name = long_name
if standard_name:
name = standard_name
units = getattr(field.construct(dim), 'units', '()')
if units[0] != '(':
units = '(' + units + ')'
return name, units
def process_color_scales():
"""
| Process colour scales to generate images of them for the web
| documentation and the rst code for inclusion in the
| colour_scale.rst file.
|
|
| No inputs
| This is an internal routine and not used by the user
|
|
|
|
|
:Returns:
None
|
|
|
"""
# Define scale categories
uniform = ['viridis', 'magma', 'inferno', 'plasma', 'parula', 'gray']
ncl_large = ['amwg256', 'BkBlAqGrYeOrReViWh200', 'BlAqGrYeOrRe',
'BlAqGrYeOrReVi200', 'BlGrYeOrReVi200', 'BlRe', 'BlueRed',
'BlueRedGray', 'BlueWhiteOrangeRed', 'BlueYellowRed',
'BlWhRe', 'cmp_b2r', 'cmp_haxby', 'detail', 'extrema',
'GrayWhiteGray', 'GreenYellow', 'helix', 'helix1',
'hotres', 'matlab_hot', 'matlab_hsv', 'matlab_jet',
'matlab_lines', 'ncl_default', 'ncview_default',
'OceanLakeLandSnow', 'rainbow', 'rainbow_white_gray',
'rainbow_white', 'rainbow_gray', 'tbr_240_300',
'tbr_stdev_0_30', 'tbr_var_0_500', 'tbrAvg1', 'tbrStd1',
'tbrVar1', 'thelix', 'ViBlGrWhYeOrRe', 'wh_bl_gr_ye_re',
'WhBlGrYeRe', 'WhBlReWh', 'WhiteBlue',
'WhiteBlueGreenYellowRed', 'WhiteGreen',
'WhiteYellowOrangeRed', 'WhViBlGrYeOrRe', 'WhViBlGrYeOrReWh',
'wxpEnIR', '3gauss', '3saw', 'BrBG']
ncl_meteoswiss = ['hotcold_18lev', 'hotcolr_19lev', 'mch_default',
'perc2_9lev', 'percent_11lev', 'precip2_15lev',
'precip2_17lev', 'precip3_16lev', 'precip4_11lev',
'precip4_diff_19lev', 'precip_11lev',
'precip_diff_12lev', 'precip_diff_1lev',
'rh_19lev', 'spread_15lev']
ncl_color_blindness = ['StepSeq25', 'posneg_2', 'posneg_1',
'BlueDarkOrange18', 'BlueDarkRed18',
'GreenMagenta16', 'BlueGreen14', 'BrownBlue12',
'Cat12']
ncl_small = ['amwg', 'amwg_blueyellowred', 'BlueDarkRed18',
'BlueDarkOrange18', 'BlueGreen14', 'BrownBlue12', 'Cat12',
'cmp_flux', 'cosam12', 'cosam', 'GHRSST_anomaly',
'GreenMagenta16', 'hotcold_18lev', 'hotcolr_19lev',
'mch_default', 'nrl_sirkes', 'nrl_sirkes_nowhite',
'perc2_9lev', 'percent_11lev', 'posneg_2', 'prcp_1', 'prcp_2',
'prcp_3', 'precip_11lev', 'precip_diff_12lev',
'precip_diff_1lev', 'precip2_15lev', 'precip2_17lev',
'precip3_16lev', 'precip4_11lev', 'precip4_diff_19lev',
'radar', 'radar_1', 'rh_19lev', 'seaice_1', 'seaice_2',
'so4_21', 'spread_15lev', 'StepSeq25', 'sunshine_9lev',
'sunshine_diff_12lev', 'temp_19lev', 'temp_diff_18lev',
'temp_diff_1lev', 'topo_15lev', 'wgne15', 'wind_17lev']
orography = ['os250kmetres', 'wiki_1_0_2', 'wiki_1_0_3',
'wiki_2_0', 'wiki_2_0_reduced', 'arctic']
idl_guide = []
for i in np.arange(1, 45):
idl_guide.append('scale' + str(i))
for category in ['uniform', 'ncl_meteoswiss', 'ncl_small', 'ncl_large',
'ncl_color_blindness', 'orography', 'idl_guide']:
if category == 'uniform':
scales = uniform
div = '================== ====='
chars = 10
title = 'Perceptually uniform colour maps for use with continuous '
title += 'data'
print(title)
print('----------------------------------------------')
print('')
print(div)
print('Name Scale')
print(div)
if category == 'ncl_meteoswiss':
scales = ncl_meteoswiss
div = '================== ====='
chars = 19
print('NCAR Command Language - MeteoSwiss colour maps')
print('----------------------------------------------')
print('')
print(div)
print('Name Scale')
print(div)
if category == 'ncl_small':
scales = ncl_small
div = '=================== ====='
chars = 20
print('NCAR Command Language - small color maps (<50 colours)')
print('------------------------------------------------------')
print('')
print(div)
print('Name Scale')
print(div)
if category == 'ncl_large':
scales = ncl_large
div = '======================= ====='
chars = 24
print('NCAR Command Language - large colour maps (>50 colours)')
print('-------------------------------------------------------')
print('')
print(div)
print('Name Scale')
print(div)
if category == 'ncl_color_blindness':
scales = ncl_color_blindness
div = '================ ====='
chars = 17
title = 'NCAR Command Language - Enhanced to help with colour'
title += 'blindness'
print(title)
title = '-----------------------------------------------------'
title += '---------'
print(title)
print('')
print(div)
print('Name Scale')
print(div)
chars = 17
if category == 'orography':
scales = orography
div = '================ ====='
chars = 17
print('Orography/bathymetry colour scales')
print('----------------------------------')
print('')
print(div)
print('Name Scale')
print(div)
chars = 17
if category == 'idl_guide':
scales = idl_guide
div = '======= ====='
chars = 8
print('IDL guide scales')
print('----------------')
print('')
print(div)
print('Name Scale')
print(div)
chars = 8
for scale in scales:
# Make image of scale
fig = plot.figure(figsize=(8, 0.5))
ax1 = fig.add_axes([0.05, 0.1, 0.9, 0.2])
cscale(scale)
cmap = matplotlib.colors.ListedColormap(plotvars.cs)
cb1 = matplotlib.colorbar.ColorbarBase(
ax1, cmap=cmap, orientation='horizontal', ticks=None)
cb1.set_ticks([0.0, 1.0])
cb1.set_ticklabels(['', ''])
file = '/home/andy/cf-docs/cfplot_sphinx/images/'
file += 'colour_scales/' + scale + '.png'
plot.savefig(file)
plot.close()
# Use convert to trim the png file to remove white space
subprocess.call(["convert", "-trim", file, file])
name_pad = scale
while len(name_pad) < chars:
name_pad = name_pad + ' '
fn = name_pad + '.. image:: images/colour_scales/' + scale + '.png'
print(fn)
print(div)
print('')
print('')
def reset():
"""
| reset all plotting variables
|
|
|
|
|
|
|
:Returns:
name
|
|
|
"""
axes()
cscale()
levs()
gset()
mapset()
setvars()
def setvars(file=None, title_fontsize=None, text_fontsize=None,
colorbar_fontsize=None, colorbar_fontweight=None,
axis_label_fontsize=None, title_fontweight=None,
text_fontweight=None, axis_label_fontweight=None, fontweight=None,
continent_thickness=None, continent_color=None,
continent_linestyle=None, viewer=None,
tspace_year=None, tspace_month=None, tspace_day=None,
tspace_hour=None, xtick_label_rotation=None,
xtick_label_align=None, ytick_label_rotation=None,
ytick_label_align=None, legend_text_weight=None,
legend_text_size=None, cs_uniform=None,
master_title=None, master_title_location=None,
master_title_fontsize=None, master_title_fontweight=None,
dpi=None, land_color=None, ocean_color=None,
lake_color=None,
rotated_grid_spacing=None, rotated_deg_spacing=None,
rotated_continents=None, rotated_grid=None,
rotated_labels=None, rotated_grid_thickness=None,
legend_frame=None,
legend_frame_edge_color=None, legend_frame_face_color=None,
degsym=None, axis_width=None, grid=None,
grid_spacing=None,
grid_colour=None, grid_linestyle=None, grid_thickness=None,
tight=None, level_spacing=None):
"""
| setvars - set plotting variables and their defaults
|
| file=None - output file name
| title_fontsize=None - title fontsize, default=15
| title_fontweight='normal' - title fontweight
| text_fontsize='normal' - text font size, default=11
| text_fontweight='normal' - text font weight
| axis_label_fontsize=None - axis label fontsize, default=11
| axis_label_fontweight='normal' - axis font weight
| legend_text_size='11' - legend text size
| legend_text_weight='normal' - legend text weight
| colorbar_fontsize='11' - colorbar text size
| colorbar_fontweight='normal' - colorbar font weight
| legend_text_weight='normal' - legend text weight
| master_title_fontsize=30 - master title font size
| master_title_fontweight='normal' - master title font weight
| continent_thickness=1.5 - default=1.5
| continent_color='k' - default='k' (black)
| continent_linestyle='solid' - default='k' (black)
| viewer='display' - use ImageMagick display program
| 'matplotlib' to use image widget to view the picture
| tspace_year=None - time axis spacing in years
| tspace_month=None - time axis spacing in months
| tspace_day=None - time axis spacing in days
| tspace_hour=None - time axis spacing in hours
| xtick_label_rotation=0 - rotation of xtick labels
| xtick_label_align='center' - alignment of xtick labels
| ytick_label_rotation=0 - rotation of ytick labels
| ytick_label_align='right' - alignment of ytick labels
| cs_uniform=True - make a uniform differential colour scale
| master_title=None - master title text
| master_title_location=[0.5,0.95] - master title location
| dpi=None - dots per inch setting
| land_color=None - land colour
| ocean_color=None - ocean colour
| lake_color=None - lake colour
| rotated_grid_spacing=10 - rotated grid spacing in degrees
| rotated_deg_spacing=0.75 - rotated grid spacing between graticule dots
| rotated_deg_tkickness=1.0 - rotated grid thickness for longitude and latitude lines
| rotated_continents=True - draw rotated continents
| rotated_grid=True - draw rotated grid
| rotated_labels=True - draw rotated grid labels
| legend_frame=True - draw a frame around a lineplot legend
| legend_frame_edge_color='k' - color for the legend frame
| legend_frame_face_color=None - color for the legend background
| degsym=True - add degree symbol to longitude and latitude axis labels
| axis_width=None - width of line for the axes
| grid=True - draw grid
| grid_spacing=1 - grid spacing in degrees
| grid_colour='k' - grid colour
| grid_linestyle='--' - grid line style
| grid_thickness=1.0 - grid thickness
| tight=False - remove whitespace around the plot
| level_spacing=None - default contour level spacing - takes 'linear', 'log', 'loglike',
| 'outlier' and 'inspect'
|
| Use setvars() to reset to the defaults
|
|
|
:Returns:
name
|
|
|
"""
vals = [file, title_fontsize, text_fontsize, axis_label_fontsize,
continent_thickness, title_fontweight, text_fontweight,
axis_label_fontweight, fontweight, continent_color,
continent_linestyle, tspace_year,
tspace_month, tspace_day, tspace_hour, xtick_label_rotation,
xtick_label_align, ytick_label_rotation, ytick_label_align,
legend_text_size, legend_text_weight, cs_uniform,
master_title, master_title_location,
master_title_fontsize, master_title_fontweight, dpi,
land_color, ocean_color, lake_color, rotated_grid_spacing,
rotated_deg_spacing, rotated_continents, rotated_grid,
rotated_grid_thickness,
rotated_labels, colorbar_fontsize, colorbar_fontweight,
legend_frame, legend_frame_edge_color, legend_frame_face_color,
degsym, axis_width, grid, grid_spacing,
grid_colour, grid_linestyle, grid_thickness, tight, level_spacing]
if all(val is None for val in vals):
plotvars.file = None
plotvars.title_fontsize = 15
plotvars.text_fontsize = 11
plotvars.colorbar_fontsize = 11
plotvars.axis_label_fontsize = 11
plotvars.title_fontweight = 'normal'
plotvars.text_fontweight = 'normal'
plotvars.colorbar_fontweight = 'normal'
plotvars.axis_label_fontweight = 'normal'
plotvars.fontweight = 'normal'
plotvars.continent_thickness = None
plotvars.continent_color = None
plotvars.continent_linestyle = None
plotvars.tspace_year = None
plotvars.tspace_month = None
plotvars.tspace_day = None
plotvars.tspace_hour = None
plotvars.xtick_label_rotation = 0
plotvars.xtick_label_align = 'center'
plotvars.ytick_label_rotation = 0
plotvars.ytick_label_align = 'right'
plotvars.legend_text_size = 11
plotvars.legend_text_weight = 'normal'
plotvars.cs_uniform = True
plotvars.viewer = plotvars.global_viewer
plotvars.master_title = None
plotvars.master_title_location = [0.5, 0.95]
plotvars.master_title_fontsize = 30
plotvars.master_title_fontweight = 'normal'
plotvars.dpi = None
plotvars.land_color = None
plotvars.ocean_color = None
plotvars.lake_color = None
plotvars.rotated_grid_spacing = 10
plotvars.rotated_deg_spacing = 0.75
plotvars.rotated_grid_thickness = 1.0
plotvars.rotated_continents = True
plotvars.rotated_grid = True
plotvars.rotated_labels = True
plotvars.legend_frame = True
plotvars.legend_frame_edge_color = 'k'
plotvars.legend_frame_face_color = None
plotvars.degsym = False
plotvars.axis_width = None
plotvars.grid = True
plotvars.grid_spacing = 1
plotvars.grid_colour = 'k'
plotvars.grid_linestyle = '--'
plotvars.grid_thickness = 1.0
matplotlib.pyplot.ioff()
plotvars.tight = False
plotvars.level_spacing = None
if file is not None:
plotvars.file = file
if title_fontsize is not None:
plotvars.title_fontsize = title_fontsize
if axis_label_fontsize is not None:
plotvars.axis_label_fontsize = axis_label_fontsize
if continent_thickness is not None:
plotvars.continent_thickness = continent_thickness
if continent_color is not None:
plotvars.continent_color = continent_color
if continent_linestyle is not None:
plotvars.continent_linestyle = continent_linestyle
if text_fontsize is not None:
plotvars.text_fontsize = colorbar_fontsize
if colorbar_fontsize is not None:
plotvars.colorbar_fontsize = colorbar_fontsize
if text_fontweight is not None:
plotvars.text_fontweight = text_fontweight
if axis_label_fontweight is not None:
plotvars.axis_label_fontweight = axis_label_fontweight
if colorbar_fontweight is not None:
plotvars.colorbar_fontweight = colorbar_fontweight
if title_fontweight is not None:
plotvars.title_fontweight = title_fontweight
if viewer is not None:
plotvars.viewer = viewer
if tspace_year is not None:
plotvars.tspace_year = tspace_year
if tspace_month is not None:
plotvars.tspace_month = tspace_month
if tspace_day is not None:
plotvars.tspace_day = tspace_day
if tspace_hour is not None:
plotvars.tspace_hour = tspace_hour
if xtick_label_rotation is not None:
plotvars.xtick_label_rotation = xtick_label_rotation
if xtick_label_align is not None:
plotvars.xtick_label_align = xtick_label_align
if ytick_label_rotation is not None:
plotvars.ytick_label_rotation = ytick_label_rotation
if ytick_label_align is not None:
plotvars.ytick_label_align = ytick_label_align
if legend_text_size is not None:
plotvars.legend_text_size = legend_text_size
if legend_text_weight is not None:
plotvars.legend_text_weight = legend_text_weight
if cs_uniform is not None:
plotvars.cs_uniform = cs_uniform
if master_title is not None:
plotvars.master_title = master_title
if master_title_location is not None:
plotvars.master_title_location = master_title_location
if master_title_fontsize is not None:
plotvars.master_title_fontsize = master_title_fontsize
if master_title_fontweight is not None:
plotvars.master_title_fontweight = master_title_fontweight
if dpi is not None:
plotvars.dpi = dpi
if land_color is not None:
plotvars.land_color = land_color
if ocean_color is not None:
plotvars.ocean_color = ocean_color
if lake_color is not None:
plotvars.lake_color = lake_color
if rotated_grid_spacing is not None:
plotvars.rotated_grid_spacing = rotated_grid_spacing
if rotated_deg_spacing is not None:
plotvars.rotated_deg_spacing = rotated_deg_spacing
if rotated_grid_thickness is not None:
plotvars.rotated_grid_thickness = rotated_grid_thickness
if rotated_continents is not None:
plotvars.rotated_continents = rotated_continents
if rotated_grid is not None:
plotvars.rotated_grid = rotated_grid
if rotated_labels is not None:
plotvars.rotated_labels = rotated_labels
if legend_frame is not None:
plotvars.legend_frame = legend_frame
if legend_frame_edge_color is not None:
plotvars.legend_frame_edge_color = legend_frame_edge_color
if legend_frame_face_color is not None:
plotvars.legend_frame_face_color = legend_frame_face_color
if degsym is not None:
plotvars.degsym = degsym
if axis_width is not None:
plotvars.axis_width = axis_width
if grid is not None:
plotvars.grid = grid
if grid_spacing is not None:
plotvars.grid_spacing = grid_spacing
if grid_colour is not None:
plotvars.grid_colour = grid_colour
if grid_linestyle is not None:
plotvars.grid_linestyle = grid_linestyle
if grid_thickness is not None:
plotvars.grid_thickness = grid_thickness
if tight is not None:
plotvars.tight = tight
if level_spacing is not None:
plotvars.level_spacing = level_spacing
def vloc(xvec=None, yvec=None, lons=None, lats=None):
"""
| vloc is used to locate the positions of a set of points in a vector
|
|
|
| xvec=None - data longitudes
| yvec=None - data latitudes
| lons=None - required longitude positions
| lats=None - required latitude positions
:Returns:
locations of user points in the longitude and latitude points
|
|
|
|
|
|
|
"""
# Check input parameters
if any(val is None for val in [xvec, yvec, lons, lats]):
errstr = '\nvloc error\n'
errstr += 'xvec, yvec, lons, lats all need to be passed to vloc to\n'
errstr += 'generate a set of location points\n'
raise Warning(errstr)
xarr = np.zeros(np.size(lons))
yarr = np.zeros(np.size(lats))
# Convert longitudes to -180 to 180.
for i in np.arange(np.size(xvec)):
xvec[i] = ((xvec[i] + 180) % 360) - 180
for i in np.arange(np.size(lons)):
lons[i] = ((lons[i] + 180) % 360) - 180
# Centre around 180 degrees longitude if needed.
if (max(xvec) > 150):
for i in np.arange(np.size(xvec)):
xvec[i] = (xvec[i] + 360.0) % 360.0
pts = np.where(xvec < 0.0)
xvec[pts] = xvec[pts] + 360.0
for i in np.arange(np.size(lons)):
lons[i] = (lons[i] + 360.0) % 360.0
pts = np.where(lons < 0.0)
lons[pts] = lons[pts] + 360.0
# Find position in array
for i in np.arange(np.size(lons)):
if ((lons[i] < min(xvec)) or (lons[i] > max(xvec))):
xpt = -1
else:
xpts = np.where(lons[i] >= xvec)
xpt = np.nanmax(xpts)
if ((lats[i] < min(yvec)) or (lats[i] > max(yvec))):
ypt = -1
else:
ypts = np.where(lats[i] >= yvec)
ypt = np.nanmax(ypts)
if (xpt >= 0):
xarr[i] = xpt + (lons[i] - xvec[xpt]) / (xvec[xpt + 1] - xvec[xpt])
else:
xarr[i] = None
if (ypt >= 0) and ypt <= np.size(yvec) - 2:
yarr[i] = ypt + (lats[i] - yvec[ypt]) / (yvec[ypt + 1] - yvec[ypt])
else:
yarr[i] = None
return (xarr, yarr)
def rgaxes(xpole=None, ypole=None, xvec=None, yvec=None,
xticks=None, xticklabels=None, yticks=None, yticklabels=None,
axes=None, xaxis=None, yaxis=None, xlabel=None, ylabel=None):
"""
| rgaxes - label rotated grid plots
|
| xpole=None - location of xpole in degrees
| ypole=None - location of ypole in degrees
| xvec=None - location of x grid points
| yvec=None - location of y grid points
|
| axes=True - plot x and y axes
| xaxis=True - plot xaxis
| yaxis=True - plot y axis
| xticks=None - xtick positions
| xticklabels=None - xtick labels
| yticks=None - y tick positions
| yticklabels=None - ytick labels
| xlabel=None - label for x axis
| ylabel=None - label for y axis
|
:Returns:
name
|
|
|
|
|
|
"""
spacing = plotvars.rotated_grid_spacing
degspacing = plotvars.rotated_deg_spacing
continents = plotvars.rotated_continents
grid = plotvars.rotated_grid
labels = plotvars.rotated_labels
grid_thickness = plotvars.rotated_grid_thickness
# Invert y array if going from north to south
# Otherwise this gives nans for all output
yvec_orig = yvec
if (yvec[0] > yvec[np.size(yvec) - 1]):
yvec = yvec[::-1]
gset(xmin=0, xmax=np.size(xvec) - 1,
ymin=0, ymax=np.size(yvec) - 1, user_gset=0)
# Set continent thickness and color if not already set
if plotvars.continent_thickness is None:
continent_thickness = 1.5
if plotvars.continent_color is None:
continent_color = 'k'
# Draw continents
if continents:
import cartopy.io.shapereader as shpreader
import shapefile
shpfilename = shpreader.natural_earth(resolution=plotvars.resolution,
category='physical',
name='coastline')
reader = shapefile.Reader(shpfilename)
shapes = [s.points for s in reader.shapes()]
for shape in shapes:
lons, lats = list(zip(*shape))
lons = np.array(lons)
lats = np.array(lats)
rotated_transform = ccrs.RotatedPole(pole_latitude=ypole, pole_longitude=xpole)
points = rotated_transform.transform_points(ccrs.PlateCarree(), lons, lats)
xout = np.array(points)[:, 0]
yout = np.array(points)[:, 1]
xpts, ypts = vloc(lons=xout, lats=yout, xvec=xvec, yvec=yvec)
plotvars.plot.plot(xpts, ypts, linewidth=continent_thickness,
color=continent_color)
if xticks is None:
lons = -180 + np.arange(360 / spacing + 1) * spacing
else:
lons = xticks
if yticks is None:
lats = -90 + np.arange(180 / spacing + 1) * spacing
else:
lats = yticks
# Work out how far from plot to plot the longitude and latitude labels
xlim = plotvars.plot.get_xlim()
spacing_x = (xlim[1] - xlim[0]) / 20
ylim = plotvars.plot.get_ylim()
spacing_y = (ylim[1] - ylim[0]) / 20
spacing = min(spacing_x, spacing_y)
# Draw lines along a longitude
if axes:
if xaxis:
for val in np.arange(np.size(lons)):
ipts = 179.0 / degspacing
lona = np.zeros(int(ipts)) + lons[val]
lata = -90 + np.arange(ipts - 1) * degspacing
rotated_transform = ccrs.RotatedPole(pole_latitude=ypole, pole_longitude=xpole)
points = rotated_transform.transform_points(ccrs.PlateCarree(), lona, lata)
xout = np.array(points)[:, 0]
yout = np.array(points)[:, 1]
xpts, ypts = vloc(lons=xout, lats=yout, xvec=xvec, yvec=yvec)
if grid:
plotvars.plot.plot(xpts, ypts, ':', linewidth=grid_thickness,
color='k')
if labels:
# Make a label unless the axis is all Nans
if (np.size(ypts[5:]) > np.sum(np.isnan(ypts[5:]))):
ymin = np.nanmin(ypts[5:])
loc = np.where(ypts == ymin)[0]
if np.size(loc) > 1:
loc = loc[1]
if loc > 0:
if np.isfinite(xpts[loc]):
line = matplotlib.lines.Line2D(
[xpts[loc], xpts[loc]], [0, -spacing/2], color='k')
plotvars.plot.add_line(line)
line.set_clip_on(False)
fw = plotvars.text_fontweight
if xticklabels is None:
xticklabel = mapaxis(lons[val], lons[val], type=1)[1][0]
else:
xticklabel = xticks[val]
plotvars.plot.text(xpts[loc], -spacing,
xticklabel,
horizontalalignment='center',
verticalalignment='top',
fontsize=plotvars.text_fontsize,
fontweight=fw)
# Draw lines along a latitude
if axes:
if yaxis:
for val in np.arange(np.size(lats)):
ipts = 359.0 / degspacing
lata = np.zeros(int(ipts)) + lats[val]
lona = -180.0 + np.arange(ipts - 1) * degspacing
rotated_transform = ccrs.RotatedPole(pole_latitude=ypole, pole_longitude=xpole)
points = rotated_transform.transform_points(ccrs.PlateCarree(), lona, lata)
xout = np.array(points)[:, 0]
yout = np.array(points)[:, 1]
xpts, ypts = vloc(lons=xout, lats=yout, xvec=xvec, yvec=yvec)
if grid:
plotvars.plot.plot(xpts, ypts, ':', linewidth=grid_thickness,
color='k')
if labels:
# Make a label unless the axis is all Nans
if (np.size(xpts[5:]) > np.sum(np.isnan(xpts[5:]))):
xmin = np.nanmin(xpts[5:])
loc = np.where(xpts == xmin)[0]
if np.size(loc) == 1:
if loc > 0:
if np.isfinite(ypts[loc]):
line = matplotlib.lines.Line2D(
[0, -spacing/2], [ypts[loc], ypts[loc]], color='k')
plotvars.plot.add_line(line)
line.set_clip_on(False)
fw = plotvars.text_fontweight
if yticklabels is None:
yticklabel = mapaxis(lats[val], lats[val], type=2)[1][0]
else:
yticklabel = yticks[val]
plotvars.plot.text(-spacing, ypts[loc],
yticklabel,
horizontalalignment='right',
verticalalignment='center',
fontsize=plotvars.text_fontsize,
fontweight=fw)
# Reset yvec
yvec = yvec_orig
def lineplot(f=None, x=None, y=None, fill=True, lines=True, line_labels=True,
title=None, ptype=0, linestyle='-', linewidth=1.0, color=None,
xlog=False, ylog=False, verbose=None, swap_xy=False,
marker=None, markersize=5.0, markeredgecolor='k',
markeredgewidth=0.5, label=None,
legend_location='upper right', xunits=None, yunits=None,
xlabel=None, ylabel=None, xticks=None, yticks=None,
xticklabels=None, yticklabels=None, xname=None, yname=None,
axes=True, xaxis=True, yaxis=True, zorder=None):
"""
| lineplot is the interface to line plotting in cf-plot.
| The minimum use is lineplot(f) where f is a CF field.
| If x and y are passed then an appropriate plot is made allowing
| x vs data and y vs data plots.
| When making a labelled line plot:
| always have a label for each line
| always put the legend location as an option to the last call to lineplot
|
| f - CF data used to make a line plot
| x - x locations of data in y
| y - y locations of data in x
| linestyle='-' - line style
| color=None - line color. Defaults to Matplotlib colour scheme unless specified
| linewidth=1.0 - line width
| marker=None - marker for points along the line
| markersize=5.0 - size of the marker
| markeredgecolor = 'k' - colour of edge around the marker
| markeredgewidth = 0.5 - width of edge around the marker
| xlog=False - log x-axis
| ylog=False - log y-axis
| label=None - line label - label for line
| legend_location='upper right' - default location of legend
| Other options are {'best': 0, 'center': 10, 'center left': 6,
| 'center right': 7, 'lower center': 8,
| 'lower left': 3, 'lower right': 4, 'right': 5,
| 'upper center': 9, 'upper left': 2, 'upper right': 1}
|
| verbose=None - change to 1 to get a verbose listing of what lineplot
| is doing
| zorder=None - plotting order
|
| The following parameters override any CF data defaults:
| title=None - plot title
| xunits=None - x units
| yunits=None - y units
| xlabel=None - x name
| ylabel=None - y name
| xname=None - depreciated keyword
| yname=None - depreciated keyword
| xticks=None - x ticks
| xticklabels=None - x tick labels
| yticks=None - y ticks
| yticklabels - y tick labels
| axes=True - plot x and y axes
| xaxis=True - plot xaxis
| yaxis=True - plot y axis
|
|
| When making a multiple line plot:
| a) set the axis limits with gset before plotting the lines
| b) the last call to lineplot is the one that any of the above
| axis overrides should be placed in.
|
|
"""
if verbose:
print('lineplot - making a line plot')
# Catch depreciated keywords
if xname is not None or yname is not None:
print('\nlineplot error')
print('xname and yname are now depreciated keywords')
print('Please use xlabel and ylabel\n')
return
##################
# Open a new plot is necessary
##################
if plotvars.user_plot == 0:
gopen(user_plot=0)
##################
# Extract required data
# If a cf-python field
##################
cf_field = False
if f is not None:
if isinstance(f, cf.Field):
cf_field = True
# Check data is 1D
ndims = np.squeeze(f.data).ndim
if ndims != 1:
errstr = "\n\ncfp.lineplot error need a 1 dimensonal field to make a line\n"
errstr += "received " + str(np.squeeze(f.data).ndim) + " dimensions\n\n"
raise TypeError(errstr)
if x is not None:
if isinstance(x, cf.Field):
errstr = "\n\ncfp.lineplot error - two or more cf-fields passed for plotting.\n"
errstr += "To plot two cf-fields open a graphics plot with cfp.gopen(), \n"
errstr += "plot the two fields separately with cfp.lineplot and then close\n"
errstr += "the graphics plot with cfp.gclose()\n\n"
raise TypeError(errstr)
elif isinstance(f, cf.FieldList):
errstr = "\n\ncfp.lineplot - cannot plot a field list\n\n"
raise TypeError(errstr)
plot_xlabel = ''
plot_ylabel = ''
xlabel_units = ''
ylabel_units = ''
if cf_field:
# Extract data
if verbose:
print('lineplot - CF field, extracting data')
has_count = 0
for mydim in list(f.dimension_coordinates()):
if np.size(np.squeeze(f.construct(mydim).array)) > 1:
has_count = has_count + 1
x = np.squeeze(f.construct(mydim).array)
# x label
xlabel_units = str(getattr(f.construct(mydim), 'Units', ''))
plot_xlabel = cf_var_name(field=f, dim=mydim) + ' ('
plot_xlabel += xlabel_units + ')'
y = np.squeeze(f.array)
# y label
if hasattr(f, 'id'):
plot_ylabel = f.id
nc = f.nc_get_variable(False)
if nc:
plot_ylabel = f.nc_get_variable()
if hasattr(f, 'short_name'):
plot_ylabel = f.short_name
if hasattr(f, 'long_name'):
plot_ylabel = f.long_name
if hasattr(f, 'standard_name'):
plot_ylabel = f.standard_name
if hasattr(f, 'Units'):
ylabel_units = str(f.Units)
else:
ylabel_units = ''
plot_ylabel += ' (' + ylabel_units + ')'
if has_count != 1:
errstr = '\n lineplot error - passed field is not suitable '
errstr += 'for plotting as a line\n'
for mydim in list(f.dimension_coordinates()):
sn = getattr(f.construct(mydim), 'standard_name', False)
ln = getattr(f.construct(mydim), 'long_name', False)
if sn:
errstr = errstr + \
str(mydim) + ',' + str(sn) + ',' + \
str(f.construct(mydim).size) + '\n'
else:
if ln:
errstr = errstr + \
str(mydim) + ',' + str(ln) + ',' + \
str(f.construct(mydim).size) + '\n'
raise Warning(errstr)
else:
if verbose:
print('lineplot - not a CF field, using passed data')
errstr = ''
if x is None or y is None:
errstr = 'lineplot error- must define both x and y'
if f is not None:
errstr += 'lineplot error- must define just x and y to make '
errstr += 'a lineplot'
if errstr != '':
raise Warning('\n' + errstr + '\n')
# Z on y-axis
ztype = None
if xlabel_units in ['mb', 'mbar', 'millibar', 'decibar',
'atmosphere', 'atm', 'pascal', 'Pa', 'hPa']:
ztype = 1
if xlabel_units in ['meter', 'metre', 'm', 'kilometer', 'kilometre', 'km']:
ztype = 2
if cf_field and f.has_construct('Z'):
myz = f.construct('Z')
if len(myz.array) > 1:
zlabel = ''
if hasattr(myz, 'long_name'):
zlabel = myz.long_name
if hasattr(myz, 'standard_name'):
zlabel = myz.standard_name
if zlabel == 'atmosphere_hybrid_height_coordinate':
ztype = 2
if ztype is not None:
x, y = y, x
plot_xlabel, plot_ylabel = plot_ylabel, plot_xlabel
# Set data values
if verbose:
print('lineplot - setting data values')
xpts = np.squeeze(x)
ypts = np.squeeze(y)
minx = np.min(x)
miny = np.min(y)
maxx = np.max(x)
maxy = np.max(y)
# Use accumulated plot limits if making a multiple line plot
if plotvars.graph_xmin is None:
plotvars.graph_xmin = minx
else:
if minx < plotvars.graph_xmin:
plotvars.graph_xmin = minx
if plotvars.graph_xmax is None:
plotvars.graph_xmax = maxx
else:
if maxx > plotvars.graph_xmax:
plotvars.graph_xmax = maxx
if plotvars.graph_ymin is None:
plotvars.graph_ymin = miny
else:
if miny < plotvars.graph_ymin:
plotvars.graph_ymin = miny
if plotvars.graph_ymax is None:
plotvars.graph_ymax = maxy
else:
if maxy > plotvars.graph_ymax:
plotvars.graph_ymax = maxy
# Reset plot limits based on accumulated plot limits
minx = plotvars.graph_xmin
maxx = plotvars.graph_xmax
miny = plotvars.graph_ymin
maxy = plotvars.graph_ymax
if cf_field and f.has_construct('T'):
taxis = f.construct('T')
if ztype == 1:
miny = np.max(y)
maxy = np.min(y)
if ztype == 2:
if cf_field and f.has_construct('Z'):
if f.construct('Z').positive == 'down':
miny = np.max(y)
maxy = np.min(y)
# Use user set values if present
time_xstr = False
time_ystr = False
if plotvars.xmin is not None:
minx = plotvars.xmin
miny = plotvars.ymin
maxx = plotvars.xmax
maxy = plotvars.ymax
# Change from date string to a number if strings are passed
try:
float(minx)
except Exception:
time_xstr = True
try:
float(miny)
except Exception:
time_ystr = True
if cf_field and f.has_construct('T'):
taxis = f.construct('T')
if time_xstr or time_ystr:
ref_time = f.construct('T').units
ref_calendar = f.construct('T').calendar
time_units = cf.Units(ref_time, ref_calendar)
if time_xstr:
t = cf.Data(cf.dt(minx), units=time_units)
minx = t.array
t = cf.Data(cf.dt(maxx), units=time_units)
maxx = t.array
taxis = cf.Data([cf.dt(plotvars.xmin),
cf.dt(plotvars.xmax)], units=time_units)
if time_ystr:
t = cf.Data(cf.dt(miny), units=time_units)
miny = t.array
t = cf.Data(cf.dt(maxy), units=time_units)
maxy = t.array
taxis = cf.Data([cf.dt(plotvars.ymin),
cf.dt(plotvars.ymax)], units=time_units)
# Set x and y labelling
# Retrieve any user defined axis labels
if plot_xlabel == '' and plotvars.xlabel is not None:
plot_xlabel = plotvars.xlabel
if plot_ylabel == '' and plotvars.ylabel is not None:
plot_ylabel = plotvars.ylabel
if xticks is None and plotvars.xticks is not None:
xticks = plotvars.xticks
if plotvars.xticklabels is not None:
xticklabels = plotvars.xticklabels
else:
xticklabels = list(map(str, xticks))
if yticks is None and plotvars.yticks is not None:
yticks = plotvars.yticks
if plotvars.yticklabels is not None:
yticklabels = plotvars.yticklabels
else:
yticklabels = list(map(str, yticks))
mod = False
ymult = 0
if xticks is None:
if plot_xlabel[0:3].lower() == 'lon':
xticks, xticklabels = mapaxis(minx, maxx, type=1)
if plot_xlabel[0:3].lower() == 'lat':
xticks, xticklabels = mapaxis(minx, maxx, type=2)
if cf_field:
if xticks is None:
if f.has_construct('T'):
if np.size(f.construct('T').array) > 1:
xticks, xticklabels, plot_xlabel = timeaxis(taxis)
if xticks is None:
xticks, ymult = gvals(dmin=minx, dmax=maxx, mod=mod)
# Fix long floating point numbers if necessary
fix_floats(xticks)
xticklabels = xticks
else:
if xticklabels is None:
xticklabels = []
for val in xticks:
xticklabels.append('{}'.format(val))
if yticks is None:
if abs(maxy - miny) > 1:
if miny < maxy:
yticks, ymult = gvals(dmin=miny, dmax=maxy, mod=mod)
if maxy < miny:
yticks, ymult = gvals(dmin=maxy, dmax=miny, mod=mod)
else:
yticks, ymult = gvals(dmin=miny, dmax=maxy, mod=mod)
# Fix long floating point numbers if necessary
fix_floats(yticks)
if yticklabels is None:
yticklabels = []
for val in yticks:
yticklabels.append(str(round(val, 9)))
if xlabel is not None:
plot_xlabel = xlabel
if xunits is not None:
plot_xlabel += '('+xunits+')'
if ylabel is not None:
plot_ylabel = ylabel
if yunits is not None:
plot_ylabel += '('+yunits+')'
if swap_xy:
if verbose:
print('lineplot - swapping x and y')
xpts, ypts = ypts, xpts
minx, miny = miny, minx
maxx, maxy = maxy, maxx
plot_xlabel, plot_ylabel = plot_ylabel, plot_xlabel
xticks, yticks = yticks, xticks
xticklabels, yticklabels = yticklabels, xticklabels
if plotvars.user_gset == 1:
if time_xstr is False and time_ystr is False:
minx = plotvars.xmin
maxx = plotvars.xmax
miny = plotvars.ymin
maxy = plotvars.ymax
if axes:
if xaxis is not True:
xticks = [100000000]
xticklabels = xticks
plot_xlabel = ''
if yaxis is not True:
yticks = [100000000]
yticklabels = yticks
plot_ylabel = ''
else:
xticks = [100000000]
xticklabels = xticks
yticks = [100000000]
yticklabels = yticks
plot_xlabel = ''
plot_ylabel = ''
# Make graph
if verbose:
print('lineplot - making graph')
xlabelalignment = plotvars.xtick_label_align
ylabelalignment = plotvars.ytick_label_align
if lines is False:
linewidth = 0.0
colorarg = {}
if color is not None:
colorarg = {'color': color}
graph = plotvars.plot
if plotvars.twinx:
graph = graph.twinx()
ylabelalignment = 'left'
if plotvars.twiny:
graph = graph.twiny()
# Reset y limits if minx = maxy
if plotvars.xmin is None:
if miny == maxy:
miny = miny - 1.0
maxy = maxy + 1.0
graph.axis([minx, maxx, miny, maxy])
graph.tick_params(direction='out', which='both', right=True, top=True)
graph.set_xlabel(plot_xlabel, fontsize=plotvars.axis_label_fontsize,
fontweight=plotvars.axis_label_fontweight)
graph.set_ylabel(plot_ylabel, fontsize=plotvars.axis_label_fontsize,
fontweight=plotvars.axis_label_fontweight)
if plotvars.xlog or xlog:
graph.set_xscale('log')
if plotvars.ylog or ylog:
graph.set_yscale('log')
if xticks is not None:
graph.set_xticks(xticks)
graph.set_xticklabels(xticklabels,
rotation=plotvars.xtick_label_rotation,
horizontalalignment=xlabelalignment,
fontsize=plotvars.axis_label_fontsize,
fontweight=plotvars.axis_label_fontweight)
if yticks is not None:
graph.set_yticks(yticks)
graph.set_yticklabels(yticklabels,
rotation=plotvars.ytick_label_rotation,
horizontalalignment=ylabelalignment,
fontsize=plotvars.axis_label_fontsize,
fontweight=plotvars.axis_label_fontweight)
graph.plot(xpts, ypts, **colorarg, linestyle=linestyle,
linewidth=linewidth, marker=marker,
markersize=markersize,
markeredgecolor=markeredgecolor,
markeredgewidth=markeredgewidth,
label=label, zorder=zorder)
# Set axis width if required
if plotvars.axis_width is not None:
for axis in ['top', 'bottom', 'left', 'right']:
plotvars.plot.spines[axis].set_linewidth(plotvars.axis_width)
# Add a legend if needed
if label is not None:
legend_properties = {
'size': plotvars.legend_text_size,
'weight': plotvars.legend_text_weight}
graph.legend(loc=legend_location, prop=legend_properties,
frameon=plotvars.legend_frame,
edgecolor=plotvars.legend_frame_edge_color,
facecolor=plotvars.legend_frame_face_color)
# Set title
if title is not None:
graph.set_title(title, fontsize=plotvars.title_fontsize,
fontweight=plotvars.title_fontweight)
##################
# Save or view plot
##################
if plotvars.user_plot == 0:
if verbose:
print('Saving or viewing plot')
gclose()
def regression_tests():
"""
| Test for cf-plot regressions
| Run through some standard levs, gvals, lon and lat labelling
| Make all the gallery plots and use Imagemaick to display them
| alongside a reference plot
|
|
|
|
|
"""
print('==================')
print('Regression testing')
print('==================')
print('')
print('------------------')
print('Testing for levels')
print('------------------')
ref_answer = [-35, -30, -25, -20, -15, -10, -5, 0, 5,
10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65]
compare_arrays(ref=ref_answer, levs_test=True, min=-35, max=65, step=5)
ref_answer = [-6., -4.8, -3.6, -2.4, -1.2, 0., 1.2, 2.4, 3.6, 4.8, 6.]
compare_arrays(ref=ref_answer, levs_test=True, min=-6, max=6, step=1.2)
ref_answer = [50000, 51000, 52000, 53000, 54000, 55000, 56000, 57000,
58000, 59000, 60000]
compare_arrays(ref=ref_answer, levs_test=True, min=50000, max=60000, step=1000)
ref_answer = [-7000, -6500, -6000, -5500, -5000, -4500, -4000, -3500,
-3000, -2500, -2000, -1500, -1000, -500]
compare_arrays(
ref=ref_answer,
levs_test=True,
min=-7000,
max=-300,
step=500)
print('')
print('-----------------')
print('Testing for gvals')
print('-----------------')
ref_answer = [281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293]
compare_arrays(ref=ref_answer, min=280.50619506835938,
max=293.48431396484375, mult=0, gvals_test=True)
ref_answer = [0.356, 0.385, 0.414, 0.443, 0.472, 0.501, 0.53, 0.559,
0.588, 0.617, 0.646, 0.675]
compare_arrays(ref=ref_answer, min=0.356, max=0.675, mult=0,
gvals_test=True)
ref_answer = [-45, -40, -35, -30, -25, -20, -15, -10, -5, 0, 5, 10, 15,
20, 25, 30, 35, 40, 45, 50]
compare_arrays(ref=ref_answer, min=-49.510975, max=53.206604, mult=0,
gvals_test=True)
ref_answer = [47000, 48000, 49000, 50000, 51000, 52000, 53000, 54000,
55000, 56000, 57000, 58000, 59000, 60000, 61000, 62000,
63000, 64000]
compare_arrays(ref=ref_answer, min=46956, max=64538, mult=0,
gvals_test=True)
ref_answer = [-1., -0.9, -0.8, -0.7, -0.6, -0.5, -0.4, -0.3, -0.2, -0.1, 0., 0.1]
compare_arrays(ref=ref_answer, min=-1.0, max=0.1, mult=0,
gvals_test=True)
print('')
print('----------------------------------------')
print('Testing for longitude/latitude labelling')
print('----------------------------------------')
ref_answer = ([-180, -120, -60, 0, 60, 120, 180],
['180', '120W', '60W', '0', '60E', '120E', '180'])
compare_arrays(ref=ref_answer, min=-180, max=180, type=1,
mapaxis_test=True)
ref_answer = ([150, 180, 210, 240, 270],
['150E', '180', '150W', '120W', '90W'])
compare_arrays(ref=ref_answer, min=135, max=280, type=1,
mapaxis_test=True)
ref_answer = ([0, 10, 20, 30, 40, 50, 60, 70, 80, 90], ['0', '10E', '20E',
'30E', '40E', '50E', '60E', '70E', '80E', '90E'])
compare_arrays(ref=ref_answer, min=0, max=90, type=1, mapaxis_test=True)
ref_answer = ([-90, -60, -30, 0, 30, 60, 90],
['90S', '60S', '30S', '0', '30N', '60N', '90N'])
compare_arrays(ref=ref_answer, min=-90, max=90, type=2, mapaxis_test=True)
ref_answer = ([0, 5, 10, 15, 20, 25, 30],
['0', '5N', '10N', '15N', '20N', '25N', '30N'])
compare_arrays(ref=ref_answer, min=0, max=30, type=2, mapaxis_test=True)
print('')
print('-----------------')
print('Testing for plots')
print('-----------------')
# Run through gallery examples and compare to reference plots
# example1
reset()
setvars(file='fig1.png')
f = cf.read('/opt/graphics/cfplot_data/tas_A1.nc')[0]
con(f.subspace(time=15))
compare_images(1)
# example2
reset()
setvars(file='fig2.png')
f = cf.read('/opt/graphics/cfplot_data/tas_A1.nc')[0]
con(f.subspace(time=15), blockfill=True, lines=False)
compare_images(2)
# example3
reset()
setvars(file='fig3.png')
f = cf.read('/opt/graphics/cfplot_data/tas_A1.nc')[0]
mapset(lonmin=-15, lonmax=3, latmin=48, latmax=60)
levs(min=265, max=285, step=1)
con(f.subspace(time=15))
compare_images(3)
# example4
reset()
setvars(file='fig4.png')
f = cf.read('/opt/graphics/cfplot_data/ggap.nc')[1]
mapset(proj='npstere')
con(f.subspace(pressure=500))
compare_images(4)
# example5
reset()
setvars(file='fig5.png')
f = cf.read('/opt/graphics/cfplot_data/ggap.nc')[1]
mapset(proj='spstere', boundinglat=-30, lon_0=180)
con(f.subspace(pressure=500))
compare_images(5)
# example6
reset()
setvars(file='fig6.png')
f = cf.read('/opt/graphics/cfplot_data/ggap.nc')[3]
con(f.subspace(longitude=0))
compare_images(6)
# example7
reset()
setvars(file='fig7.png')
f = cf.read('/opt/graphics/cfplot_data/ggap.nc')[1]
con(f.collapse('mean', 'longitude'))
compare_images(7)
# example8
reset()
setvars(file='fig8.png')
f = cf.read('/opt/graphics/cfplot_data/ggap.nc')[1]
con(f.collapse('mean', 'longitude'), ylog=1)
compare_images(8)
# example9
reset()
setvars(file='fig9.png')
f = cf.read('/opt/graphics/cfplot_data/ggap.nc')[0]
con(f.collapse('mean', 'latitude'))
compare_images(9)
# example10
reset()
setvars(file='fig10.png')
f = cf.read('/opt/graphics/cfplot_data/tas_A1.nc')[0]
cscale('plasma')
con(f.subspace(longitude=0), lines=0)
compare_images(10)
# example11
reset()
setvars(file='fig11.png')
f = cf.read('/opt/graphics/cfplot_data/tas_A1.nc')[0]
gset(-30, 30, '1960-1-1', '1980-1-1')
levs(min=280, max=305, step=1)
cscale('plasma')
con(f.subspace(longitude=0), lines=0)
compare_images(11)
# example12
reset()
setvars(file='fig12.png')
f = cf.read('/opt/graphics/cfplot_data/tas_A1.nc')[0]
cscale('plasma')
con(f.subspace(latitude=0), lines=0)
compare_images(12)
# example13
reset()
setvars(file='fig13.png')
f = cf.read('/opt/graphics/cfplot_data/ggap.nc')
u = f[1].subspace(pressure=500)
v = f[3].subspace(pressure=500)
vect(u=u, v=v, key_length=10, scale=100, stride=5)
compare_images(13)
# example14
reset()
setvars(file='fig14.png')
f = cf.read('/opt/graphics/cfplot_data/ggap.nc')
u = f[1].subspace(pressure=500)
v = f[3].subspace(pressure=500)
t = f[0].subspace(pressure=500)
gopen()
mapset(lonmin=10, lonmax=120, latmin=-30, latmax=30)
levs(min=254, max=270, step=1)
con(t)
vect(u=u, v=v, key_length=10, scale=50, stride=2)
gclose()
compare_images(14)
# example15
reset()
setvars(file='fig15.png')
u = cf.read('/opt/graphics/cfplot_data/ggap.nc')[1]
v = cf.read('/opt/graphics/cfplot_data/ggap.nc')[3]
u = u.subspace(Z=500)
v = v.subspace(Z=500)
mapset(proj='npstere')
vect(u=u, v=v, key_length=10, scale=100, pts=40,
title='Polar plot with regular point distribution')
compare_images(15)
# example16
reset()
setvars(file='fig16.png')
c = cf.read('/opt/graphics/cfplot_data/vaAMIPlcd_DJF.nc')[0]
c = c.subspace(Y=cf.wi(-60, 60))
c = c.subspace(X=cf.wi(80, 160))
c = c.collapse('T: mean X: mean')
g = cf.read('/opt/graphics/cfplot_data/wapAMIPlcd_DJF.nc')[0]
g = g.subspace(Y=cf.wi(-60, 60))
g = g.subspace(X=cf.wi(80, 160))
g = g.collapse('T: mean X: mean')
vect(u=c, v=-g, key_length=[5, 0.05],
scale=[20, 0.2], title='DJF', key_location=[0.95, -0.05])
compare_images(16)
# example17
reset()
setvars(file='fig17.png')
f = cf.read('/opt/graphics/cfplot_data/tas_A1.nc')[0]
g = f.subspace(time=15)
gopen()
cscale('magma')
con(g)
stipple(f=g, min=220, max=260, size=100, color='#00ff00')
stipple(f=g, min=300, max=330, size=50, color='#0000ff', marker='s')
gclose()
compare_images(17)
# example18
reset()
setvars(file='fig18.png')
f = cf.read('/opt/graphics/cfplot_data/tas_A1.nc')[0]
g = f.subspace(time=15)
gopen()
cscale('magma')
mapset(proj='npstere')
con(g)
stipple(f=g, min=265, max=295, size=100, color='#00ff00')
gclose()
compare_images(18)
# example19
reset()
setvars(file='fig19.png')
f = cf.read('/opt/graphics/cfplot_data/ggap.nc')[1]
gopen(rows=2, columns=2, bottom=0.2)
gpos(1)
con(f.subspace(pressure=500), colorbar=None)
gpos(2)
mapset(proj='moll')
con(f.subspace(pressure=500), colorbar=None)
gpos(3)
mapset(proj='npstere', boundinglat=30, lon_0=180)
con(f.subspace(pressure=500), colorbar=None)
gpos(4)
mapset(proj='spstere', boundinglat=-30, lon_0=180)
con(f.subspace(pressure=500), colorbar_position=[
0.1, 0.1, 0.8, 0.02], colorbar_orientation='horizontal')
gclose()
compare_images(19)
# example20
reset()
setvars(file='fig20.png')
f = cf.read('/opt/graphics/cfplot_data/Geostropic_Adjustment.nc')[0]
con(f.subspace[9])
compare_images(20)
# example21
reset()
setvars(file='fig21.png')
f = cf.read('/opt/graphics/cfplot_data/Geostropic_Adjustment.nc')[0]
con(f.subspace[9], title='test data',
xticks=np.arange(5) * 100000 + 100000,
yticks=np.arange(7) * 2000 + 2000,
xlabel='x-axis', ylabel='z-axis')
compare_images(21)
# example22
reset()
setvars(file='fig22.png')
f = cf.read_field('/opt/graphics/cfplot_data/rgp.nc')
cscale('gray')
con(f)
compare_images(22)
# example23
reset()
setvars(file='fig23.png')
f = cf.read_field('/opt/graphics/cfplot_data/rgp.nc')
data = f.array
xvec = f.construct('dim1').array
yvec = f.construct('dim0').array
xpole = 160
ypole = 30
gopen()
cscale('plasma')
xpts = np.arange(np.size(xvec))
ypts = np.arange(np.size(yvec))
gset(xmin=0, xmax=np.size(xvec) - 1, ymin=0, ymax=np.size(yvec) - 1)
levs(min=980, max=1035, step=2.5)
con(data, xpts, ypts[::-1])
rgaxes(xpole=xpole, ypole=ypole, xvec=xvec, yvec=yvec)
gclose()
compare_images(23)
# example24
reset()
setvars(file='fig24.png')
from matplotlib.mlab import griddata
# Arrays for data
lons = []
lats = []
pressure = []
temp = []
# Read data
f = open('/opt/graphics/cfplot_data/synop_data.txt')
lines = f.readlines()
for line in lines:
mysplit = line.split()
lons = np.append(lons, float(mysplit[1]))
lats = np.append(lats, float(mysplit[2]))
pressure = np.append(pressure, float(mysplit[3]))
temp = np.append(temp, float(mysplit[4]))
# Linearly interpolate data to a regular grid
lons_new = np.arange(140) * 0.1 - 11.0
lats_new = np.arange(140) * 0.1 + 49.0
temp_new = griddata(lons, lats, temp, lons_new, lats_new, interp='linear')
cscale('parula')
con(x=lons_new, y=lats_new, f=temp_new, ptype=1)
compare_images(24)
# example25
reset()
setvars(file='fig25.png')
gopen()
con(x=lons_new, y=lats_new, f=temp_new, ptype=1)
for i in np.arange(len(lines)):
plotvars.plot.text(float(lons[i]), float(lats[i]), str(temp[i]),
horizontalalignment='center',
verticalalignment='center')
gclose()
compare_images(25)
# example26
reset()
setvars(file='fig26.png')
from netCDF4 import Dataset as ncfile
from matplotlib.mlab import griddata
# Get an Orca grid and flatten the arrays
nc = ncfile('/opt/graphics/cfplot_data/orca2.nc')
lons = np.array(nc.variables['longitude'])
lats = np.array(nc.variables['latitude'])
temp = np.array(nc.variables['sst'])
lons = lons.flatten()
lats = lats.flatten()
temp = temp.flatten()
# Add wrap around at both longitude limits
pts = np.squeeze(np.where(lons < -150))
lons = np.append(lons, lons[pts] + 360)
lats = np.append(lats, lats[pts])
temp = np.append(temp, temp[pts])
pts = np.squeeze(np.where(lons > 150))
lons = np.append(lons, lons[pts] - 360)
lats = np.append(lats, lats[pts])
temp = np.append(temp, temp[pts])
lons_new = np.arange(181 * 8) * 0.25 - 180.0
lats_new = np.arange(91 * 8) * 0.25 - 90.0
temp_new = griddata(lons, lats, temp, lons_new, lats_new, interp='linear')
con(x=lons_new, y=lats_new, f=temp_new, ptype=1)
compare_images(26)
# example27
reset()
setvars(file='fig27.png')
f = cf.read('/opt/graphics/cfplot_data/ggap.nc')[1]
g = f.collapse('X: mean')
lineplot(g.subspace(pressure=100), marker='o', color='blue',
title='Zonal mean zonal wind at 100mb')
compare_images(27)
# example28
reset()
setvars(file='fig28.png')
f = cf.read('/opt/graphics/cfplot_data/ggap.nc')[1]
g = f.collapse('X: mean')
xticks = [-90, -75, -60, -45, -30, -15, 0, 15, 30, 45, 60, 75, 90]
xticklabels = ['90S', '75S', '60S', '45S', '30S', '15S', '0', '15N',
'30N', '45N', '60N', '75N', '90N']
xpts = [-30, 30, 30, -30, -30]
ypts = [-8, -8, 5, 5, -8]
gset(xmin=-90, xmax=90, ymin=-10, ymax=50)
gopen()
lineplot(g.subspace(pressure=100), marker='o', color='blue',
title='Zonal mean zonal wind', label='100mb')
lineplot(g.subspace(pressure=200), marker='D', color='red',
label='200mb', xticks=xticks, xticklabels=xticklabels,
legend_location='upper right')
plotvars.plot.plot(xpts, ypts, linewidth=3.0, color='green')
plotvars.plot.text(35, -2, 'Region of interest',
horizontalalignment='left')
gclose()
compare_images(28)
# example29
reset()
setvars(file='fig29.png')
f = cf.read('/opt/graphics/cfplot_data/tas_A1.nc')[0]
temp = f.subspace(time=cf.wi(cf.dt('1900-01-01'), cf.dt('1980-01-01')))
temp_annual = temp.collapse('T: mean', group=cf.Y())
temp_annual_global = temp_annual.collapse('area: mean', weights='area')
temp_annual_global.Units -= 273.15
lineplot(
temp_annual_global,
title='Global average annual temperature',
color='blue')
compare_images(29)
def compare_images(example=None):
"""
| Compare images and return an error string if they don't match
|
|
|
|
|
|
|
"""
import hashlib
disp = which('display')
conv = which('convert')
comp = which('compare')
file = 'fig' + str(example) + '.png'
file_new = '/home/andy/cfplot.src/cfplot/' + file
file_ref = '/home/andy/regression/' + file
# Check md5 checksums are the same and display files if not
if hashlib.md5(open(file_new, 'rb').read()).hexdigest() != hashlib.md5(
open(file_ref, 'rb').read()).hexdigest():
print('***Failed example ' + str(example) + '**')
error_image = '/home/andy/cfplot.src/cfplot/' + 'error_' + file
diff_image = '/home/andy/cfplot.src/cfplot/' + 'difference_' + file
p = subprocess.Popen([comp, file_new, file_ref, diff_image])
(output, err) = p.communicate()
p.wait()
p = subprocess.Popen([conv, "+append", file_new,
file_ref, error_image])
(output, err) = p.communicate()
p.wait()
subprocess.Popen([disp, diff_image])
else:
print('Passed example ' + str(example))
def compare_arrays(ref=None, levs_test=None, gvals_test=None,
mapaxis_test=None, min=None, max=None, step=None,
mult=None, type=None):
"""
| Compare arrays and return an error string if they don't match
|
|
|
|
|
|
|
"""
anom = 0
if levs_test:
levs(min, max, step)
if np.size(ref) != np.size(plotvars.levels):
anom = 1
else:
for val in np.arange(np.size(ref)):
if abs(ref[val] - plotvars.levels[val]) >= 1e-6:
anom = 1
if anom == 1:
print('***levs failure***')
print('min, max, step are', min, max, step)
print('generated levels are:')
print(plotvars.levels)
print('expected levels:')
print(ref)
else:
pass_str = 'Passed cfp.levs(min=' + str(min) + ', max='
pass_str += str(max) + ', step=' + str(step) + ')'
print(pass_str)
anom = 0
if gvals_test:
vals, testmult = gvals(min, max)
if np.size(ref) != np.size(vals):
anom = 1
else:
for val in np.arange(np.size(ref)):
if abs(ref[val] - vals[val]) >= 1e-6:
anom = 1
if mult != testmult:
anom = 1
if anom == 1:
print('***gvals failure***')
print('cfp.gvals(' + str(min) + ', ' + str(max) + ')')
print('')
print('generated values are:', vals)
print('with a multiplier of ', testmult)
print('')
print('expected values:', ref)
print('with a multiplier of ', mult)
else:
pass_str = 'Passed cfp.gvals(' + str(min) + ', ' + str(max) + ')'
print(pass_str)
anom = 0
if mapaxis_test:
ref_ticks = ref[0]
ref_labels = ref[1]
test_ticks, test_labels = mapaxis(min=min, max=max, type=type)
if np.size(test_ticks) != np.size(ref_ticks):
anom = 1
else:
for val in np.arange(np.size(ref_ticks)):
if abs(ref_ticks[val] - test_ticks[val]) >= 1e-6:
anom = 1
if ref_labels[val] != test_labels[val]:
anom = 1
if anom == 1:
print('***mapaxis failure***')
print('')
print('cfp.mapaxis(min=' + str(min) + ', max=' + str(max))
print(', type=' + str(type) + ')')
print('generated values are:', test_ticks)
print('with labels:', test_labels)
print('')
print('expected ticks:', ref_ticks)
print('with labels:', ref_labels)
else:
pass_str = 'Passed cfp.mapaxis(min=' + str(min) + ', max='
pass_str += str(max) + ', type=' + str(type) + ')'
print(pass_str)
def traj(f=None, title=None, ptype=0, linestyle='-', linewidth=1.0, linecolor='b',
marker='o', markevery=1, markersize=5.0, markerfacecolor='r',
markeredgecolor='g', markeredgewidth=1.0, latmax=None, latmin=None,
axes=True, xaxis=True, yaxis=True,
verbose=None, legend=False, legend_lines=False,
xlabel=None, ylabel=None, xticks=None, yticks=None,
xticklabels=None, yticklabels=None, colorbar=None,
colorbar_position=None, colorbar_orientation='horizontal',
colorbar_title=None, colorbar_text_up_down=False,
colorbar_text_down_up=False, colorbar_drawedges=True,
colorbar_fraction=None, colorbar_thick=None,
colorbar_anchor=None, colorbar_shrink=None,
colorbar_labels=None,
vector=False, head_width=0.4, head_length=1.0,
fc='k', ec='k', zorder=None):
"""
| traj is the interface to trajectory plotting in cf-plot.
| The minimum use is traj(f) where f is a CF field.
|
| f - CF data used to make a line plot
| linestyle='-' - line style
| linecolor='b' - line colour
| linewidth=1.0 - line width
| marker='o' - marker for points along the line
| markersize=30 - size of the marker
| markerfacecolor='b' - colour of the marker face
| markeredgecolor='g' - colour of the marker edge
| legend=False - plot different colour markers based on a set of user levels
| zorder=None - order for plotting
| verbose=None - Set to True to get a verbose listing of what traj is doing
|
| The following parameters override any CF data defaults:
| title=None - plot title
| axes=True - plot x and y axes
| xaxis=True - plot xaxis
| yaxis=True - plot y axis
| xlabel=None - x name
| ylabel=None - y name
| xticks=None - x ticks
| xticklabels=None - x tick labels
| yticks=None - y ticks
| yticklabels=None - y tick labels
| colorbar=None - plot a colorbar
| colorbar_position=None - position of colorbar
| [xmin, ymin, x_extent,y_extent] in normalised
| coordinates. Use when a common colorbar
| is required for a set of plots. A typical set
| of values would be [0.1, 0.05, 0.8, 0.02]
| colorbar_orientation=None - orientation of the colorbar
| colorbar_title=None - title for the colorbar
| colorbar_text_up_down=False - if True horizontal colour bar labels alternate
| above (start) and below the colour bar
| colorbar_text_down_up=False - if True horizontal colour bar labels alternate
| below (start) and above the colour bar
| colorbar_drawedges=True - draw internal divisions in the colorbar
| colorbar_fraction=None - space for the colorbar - default = 0.21, in normalised
| coordinates
| colorbar_thick=None - thickness of the colorbar - default = 0.015, in normalised
| coordinates
| colorbar_anchor=None - default=0.5 - anchor point of colorbar within the fraction space.
| 0.0 = close to plot, 1.0 = further away
| colorbar_shrink=None - value to shrink the colorbar by. If the colorbar
| exceeds the plot area then values of 1.0, 0.55
| or 0.5m ay help it better fit the plot area.
| colorbar_labels=None - labels for the colorbar. Default is to use the levels defined
| using cfp.levs
| Vector options
| vector=False - Draw vectors
| head_width=2.0 - vector head width
| head_length=2.0 - vector head length
| fc='k' - vector face colour
| ec='k' - vector edge colour
"""
if verbose:
print('traj - making a trajectory plot')
if isinstance(f, cf.FieldList):
errstr = "\n\ncfp.traj - cannot make a trajectory plot from a field list "
errstr += "- need to pass a field\n\n"
raise TypeError(errstr)
# Read in data
# Find the auxiliary lons and lats if provided
has_lons = False
has_lats = False
for mydim in list(f.auxiliary_coordinates()):
name = cf_var_name(field=f, dim=mydim)
if name in ['longitude']:
lons = np.squeeze(f.construct(mydim).array)
has_lons = True
if name in ['latitude']:
lats = np.squeeze(f.construct(mydim).array)
has_lats = True
data = f.array
# Raise an error if lons and lats not found in the input data
if not has_lons or not has_lats:
message = '\n\n\ntraj error\n'
if not has_lons:
message += 'missing longitudes in the field auxiliary data\n'
if not has_lats:
message += 'missing latitudes in the field auxiliary data\n'
message += '\n\n\n'
raise TypeError(message)
if latmax is not None:
pts = np.where(lats >= latmax)
if np.size(pts) > 0:
lons[pts] = np.nan
lats[pts] = np.nan
if latmin is not None:
pts = np.where(lats <= latmin)
if np.size(pts) > 0:
lons[pts] = np.nan
lats[pts] = np.nan
# Set potential user axis labels
user_xlabel = xlabel
user_ylabel = ylabel
user_xlabel = ''
user_ylabel = ''
# Set plotting parameters
continent_thickness = 1.5
continent_color = 'k'
continent_linestyle = '-'
if plotvars.continent_thickness is not None:
continent_thickness = plotvars.continent_thickness
if plotvars.continent_color is not None:
continent_color = plotvars.continent_color
if plotvars.continent_linestyle is not None:
continent_linestyle = plotvars.continent_linestyle
land_color = plotvars.land_color
ocean_color = plotvars.ocean_color
lake_color = plotvars.lake_color
##################
# Open a new plot is necessary
##################
if plotvars.user_plot == 0:
gopen(user_plot=0)
# Set up mapping
if plotvars.user_mapset == 0:
plotvars.lonmin = -180
plotvars.lonmax = 180
plotvars.latmin = -90
plotvars.latmax = 90
set_map()
mymap = plotvars.mymap
# Set the plot limits
gset(xmin=plotvars.lonmin, xmax=plotvars.lonmax,
ymin=plotvars.latmin, ymax=plotvars.latmax, user_gset=0)
# Make lons and lats 2d if they are 1d
ndim = np.ndim(lons)
if ndim == 1:
lons = lons.reshape(1, -1)
lats = lats.reshape(1, -1)
ntracks = np.shape(lons)[0]
if ndim == 1:
ntracks = 1
if legend or legend_lines:
# Check levels are not None
levs = plotvars.levels
if plotvars.levels is not None:
if verbose:
print('traj - plotting different colour markers based on a user set of levels')
levs = plotvars.levels
else:
# Automatic levels
if verbose:
print('traj - generating automatic legend levels')
dmin = np.nanmin(data)
dmax = np.nanmax(data)
levs, mult = gvals(dmin=dmin, dmax=dmax, mod=False)
# Add extend options to the levels if set
if plotvars.levels_extend == 'min' or plotvars.levels_extend == 'both':
levs = np.append(-1e-30, levs)
if plotvars.levels_extend == 'max' or plotvars.levels_extend == 'both':
levs = np.append(levs, 1e30)
# Set the default colour scale
if plotvars.cscale_flag == 0:
cscale('viridis', ncols=np.size(levs) + 1)
plotvars.cscale_flag = 0
# User selected colour map but no mods so fit to levels
if plotvars.cscale_flag == 1:
cscale(plotvars.cs_user, ncols=np.size(levs) + 1)
plotvars.cscale_flag = 1
##################################
# Line, symbol and vector plotting
##################################
for track in np.arange(ntracks):
xpts = lons[track, :]
ypts = lats[track, :]
data2 = data[track, :]
xpts_orig = deepcopy(xpts)
xpts = np.mod(xpts + 180, 360) - 180
# Check if xpts are only within the remapped longitudes above
if np.min(xpts) < -170 or np.max(xpts) > 170:
xpts = xpts_orig
for ix in np.arange(np.size(xpts)-1):
diff = xpts[ix+1] - xpts[ix]
if diff >= 60:
xpts[ix+1] = xpts[ix+1] - 360.0
if diff <= -60:
xpts[ix+1] = xpts[ix+1] + 360.0
# Plot lines and markers
plot_linewidth = linewidth
plot_markersize = markersize
if legend:
plot_markersize = 0.0
if plot_linewidth > 0.0 or plot_markersize > 0.0:
if verbose and track == 0 and linewidth > 0.0:
print('plotting lines')
if verbose and track == 0 and markersize > 0.0:
print('plotting markers')
if legend_lines is False:
mymap.plot(xpts, ypts, color=linecolor,
linewidth=plot_linewidth, linestyle=linestyle,
marker=marker, markevery=markevery, markersize=plot_markersize,
markerfacecolor=markerfacecolor, markeredgecolor=markeredgecolor,
markeredgewidth=markeredgewidth,
zorder=zorder, clip_on=True, transform=ccrs.PlateCarree())
else:
line_xpts = xpts.compressed()
line_ypts = ypts.compressed()
line_data = data2.compressed()
for i in np.arange(np.size(line_xpts)-1):
val = (line_data[i] + line_data[i+1])/2.0
col = plotvars.cs[np.max(np.where(val > plotvars.levels))]
mymap.plot(line_xpts[i:i+2], line_ypts[i:i+2], color=col,
linewidth=plot_linewidth, linestyle=linestyle,
zorder=zorder, clip_on=True, transform=ccrs.PlateCarree())
# Plot vectors
if vector:
if verbose and track == 0:
print('plotting vectors')
if zorder is None:
plot_zorder = 101
else:
plot_zorder = zorder
if plotvars.proj == 'cyl':
if isinstance(xpts, np.ma.MaskedArray):
pts = np.ma.MaskedArray.count(xpts)
else:
pts = xpts.size
for pt in np.arange(pts-1):
mymap.arrow(xpts[pt], ypts[pt],
xpts[pt+1] - xpts[pt],
ypts[pt+1] - ypts[pt],
head_width=head_width,
head_length=head_length,
fc=fc, ec=ec,
length_includes_head=True,
zorder=plot_zorder, clip_on=True,
transform=ccrs.PlateCarree())
# Plot different colour markers based on a user set of levels
if legend:
# For polar stereographic plots mask any points outside the plotting limb
if plotvars.proj == 'npstere':
pts = np.where(lats < plotvars.boundinglat)
if np.size(pts) > 0:
lats[pts] = np.nan
if plotvars.proj == 'spstere':
pts = np.where(lats > plotvars.boundinglat)
if np.size(pts) > 0:
lats[pts] = np.nan
for track in np.arange(ntracks):
xpts = lons[track, :]
ypts = lats[track, :]
data2 = data[track, :]
for i in np.arange(np.size(levs)-1):
color = plotvars.cs[i]
if np.ma.is_masked(data2):
pts = np.ma.where(np.logical_and(data2 >= levs[i], data2 <= levs[i+1]))
else:
pts = np.where(np.logical_and(data2 >= levs[i], data2 <= levs[i+1]))
if zorder is None:
plot_zorder = 101
else:
plot_zorder = zorder
if np.size(pts) > 0:
mymap.scatter(xpts[pts], ypts[pts],
s=markersize*15,
c=color,
marker=marker,
edgecolors=markeredgecolor,
transform=ccrs.PlateCarree(), zorder=plot_zorder)
# Axes
plot_map_axes(axes=axes, xaxis=xaxis, yaxis=yaxis,
xticks=xticks, xticklabels=xticklabels,
yticks=yticks, yticklabels=yticklabels,
user_xlabel=user_xlabel, user_ylabel=user_ylabel,
verbose=verbose)
# Coastlines
feature = cfeature.NaturalEarthFeature(name='land', category='physical',
scale=plotvars.resolution,
facecolor='none')
mymap.add_feature(feature, edgecolor=continent_color,
linewidth=continent_thickness,
linestyle=continent_linestyle)
if ocean_color is not None:
mymap.add_feature(cfeature.OCEAN, edgecolor='face', facecolor=ocean_color,
zorder=100)
if land_color is not None:
mymap.add_feature(cfeature.LAND, edgecolor='face', facecolor=land_color,
zorder=100)
if lake_color is not None:
mymap.add_feature(cfeature.LAKES, edgecolor='face', facecolor=lake_color,
zorder=100)
# Title
if title is not None:
map_title(title)
# Color bar
plot_colorbar = False
if colorbar is None and legend:
plot_colorbar = True
if colorbar is None and legend_lines:
plot_colorbar = True
if colorbar:
plot_colorbar = True
if plot_colorbar:
if (colorbar_title is None):
colorbar_title = 'No Name'
if hasattr(f, 'id'):
colorbar_title = f.id
nc = f.nc_get_variable(False)
if nc:
colorbar_title = f.nc_get_variable()
if hasattr(f, 'short_name'):
colorbar_title = f.short_name
if hasattr(f, 'long_name'):
colorbar_title = f.long_name
if hasattr(f, 'standard_name'):
colorbar_title = f.standard_name
if hasattr(f, 'Units'):
if str(f.Units) == '':
colorbar_title += ''
else:
colorbar_title += '(' + supscr(str(f.Units)) + ')'
levs = plotvars.levels
if colorbar_labels is not None:
levs = colorbar_labels
cbar(levs=levs, labels=levs,
orientation=colorbar_orientation,
position=colorbar_position,
text_up_down=colorbar_text_up_down,
text_down_up=colorbar_text_down_up,
drawedges=colorbar_drawedges,
fraction=colorbar_fraction,
thick=colorbar_thick,
shrink=colorbar_shrink,
anchor=colorbar_anchor,
title=colorbar_title,
verbose=verbose)
##########
# Save plot
##########
if plotvars.user_plot == 0:
gclose()
def cbar(labels=None,
orientation=None,
position=None,
shrink=None,
fraction=None,
title=None,
fontsize=None,
fontweight=None,
text_up_down=None,
text_down_up=None,
drawedges=None,
levs=None,
thick=None,
anchor=None,
extend=None,
mid=None,
verbose=None):
"""
| cbar is the cf-plot interface to the Matplotlib colorbar routine
|
| labels - colorbar labels
| orientation - orientation 'horizontal' or 'vertical'
| position - user specified colorbar position in normalised
| plot coordinates [left, bottom, width, height]
| shrink - default=1.0 - scale colorbar along length
| fraction - default = 0.21 - space for the colorbar in
| normalised plot coordinates
| title - title for the colorbar
| fontsize - font size for the colorbar text
| fontweight - font weight for the colorbar text
| text_up_down - label division text up and down starting with up
| text_down_up - label division text down and up starting with down
| drawedges - Draw internal delimeter lines in colorbar
| levs - colorbar levels
| thick - set height of colorbar - default = 0.015,
| in normalised plot coordinates
| anchor - default=0.3 - anchor point of colorbar within the fraction space.
| 0.0 = close to plot, 1.0 = further away
| extend = None - extensions for colorbar. The default is for extensions at
| both ends.
| mid = False - label mid points of colours rather than the boundaries
| verbose = None
|
|
|
"""
if verbose:
print('con - adding a colour bar')
if levs is None:
if plotvars.levels is not None:
levs = np.array(plotvars.levels)
else:
if labels is None:
errstr = "\n\ncbar error - No levels or labels supplied \n\n"
raise TypeError(errstr)
else:
levs = np.arange(len(labels))
if fontsize is None:
fontsize = plotvars.colorbar_fontsize
if fontweight is None:
fontweight = plotvars.colorbar_fontweight
if thick is None:
thick = 0.012
if plotvars.rows == 2:
thick = 0.008
if plotvars.rows == 3:
thick = 0.005
if plotvars.rows >= 4:
thick = 0.003
if drawedges is None:
drawedges = True
if orientation is None:
orientation = 'horizontal'
if fraction is None:
fraction = 0.12
if plotvars.rows == 2:
fraction = 0.08
if plotvars.rows == 3:
fraction = 0.06
if plotvars.rows >= 4:
fraction = 0.04
if shrink is None:
shrink = 1.0
if anchor is None:
anchor = 0.3
if plotvars.plot_type > 1:
anchor = 0.5
if labels is None:
labels = levs
# Work out colour bar labeling
lbot = levs
if text_up_down:
lbot = levs[1:][::2]
ltop = levs[::2]
if text_down_up:
lbot = levs[::2]
ltop = levs[1:][::2]
# Get the colour map
colmap = cscale_get_map()
cmap = matplotlib.colors.ListedColormap(colmap)
if extend is None:
extend = plotvars.levels_extend
ncolors = np.size(levs)
if extend == 'both' or extend == 'max':
ncolors = ncolors - 1
plotvars.norm = matplotlib.colors.BoundaryNorm(
boundaries=levs, ncolors=ncolors)
# Change boundaries to floats
boundaries = levs.astype(float)
# Add colorbar extensions if definded by levs. Using boundaries[0]-1
# for the lower and boundaries[-1]+1 is just for the colorbar and
# has no meaning for the plot.
if (extend == 'min' or extend == 'both'):
cmap.set_under(plotvars.cs[0])
boundaries = np.insert(boundaries, 0, boundaries[0]-1)
if (extend == 'max' or extend == 'both'):
cmap.set_over(plotvars.cs[-1])
boundaries = np.insert(boundaries, len(boundaries), boundaries[-1]+1)
if position is None:
# Work out whether the plot is a map plot or normal plot
if (plotvars.plot_type == 1 or plotvars.plot_type == 6):
this_plot = plotvars.mymap
else:
this_plot = plotvars.plot
if plotvars.plot_type == 6 and plotvars.proj == 'rotated':
this_plot = plotvars.plot
l, b, w, h = this_plot.get_position().bounds
if orientation == 'horizontal':
if plotvars.plot_type > 1 or plotvars.plot == 0 or plotvars.proj != 'cyl':
this_plot.set_position([l, b + fraction, w, h - fraction])
if plotvars.plot_type == 1 and plotvars.proj == 'cyl':
# Move plot up if aspect ratio is < 1.1
lonrange = plotvars.lonmax - plotvars.lonmin
latrange = plotvars.latmax - plotvars.latmin
if (lonrange / latrange) < 1.1:
this_plot.set_position([l, b + fraction, w, h - fraction])
l, b, w, h = this_plot.get_position().bounds
ax1 = plotvars.master_plot.add_axes([l + w * (1.0 - shrink)/2.0,
b - fraction * (1.0 - anchor),
w * shrink,
thick])
else:
ax1 = plotvars.master_plot.add_axes([l + w * (1.0 - shrink)/2.0,
b,
w * shrink,
thick])
if plotvars.plot_type > 1 or plotvars.plot_type == 0:
this_plot.set_position([l, b + fraction, w, h - fraction])
else:
ax1 = plotvars.master_plot.add_axes([l + w + fraction * (anchor - 1),
b + h * (1.0 - shrink) / 2.0,
thick,
h * shrink])
this_plot.set_position([l, b, w - fraction, h])
if mid is not None:
lbot_new = []
for i in np.arange(len(labels)):
mid_point = (lbot[i+1]-lbot[i])/2.0+lbot[i]
lbot_new.append(mid_point)
lbot = lbot_new
colorbar = matplotlib.colorbar.ColorbarBase(ax1, cmap=cmap,
norm=plotvars.norm,
extend=extend,
extendfrac='auto',
boundaries=boundaries,
ticks=lbot,
spacing='uniform',
orientation=orientation,
drawedges=drawedges)
else:
if mid is not None:
lbot_new = []
for i in np.arange(len(labels)):
mid_point = (lbot[i+1]-lbot[i])/2.0+lbot[i]
lbot_new.append(mid_point)
lbot = lbot_new
ax1 = plotvars.master_plot.add_axes(position)
colorbar = matplotlib.colorbar.ColorbarBase(
ax1, cmap=cmap,
norm=plotvars.norm,
extend=extend,
extendfrac='auto',
boundaries=boundaries,
ticks=lbot,
spacing='uniform',
orientation=orientation,
drawedges=drawedges)
colorbar.set_label(title, fontsize=fontsize,
fontweight=fontweight)
# Bug in Matplotlib colorbar labelling
# With clevs=[-1, 1, 10000, 20000, 30000, 40000, 50000, 60000]
# Labels are [0, 2, 10001, 20001, 30001, 40001, 50001, 60001]
# With a +1 near to the colorbar label
colorbar.set_ticklabels([str(i) for i in labels])
if orientation == 'horizontal':
for tick in colorbar.ax.xaxis.get_ticklines():
tick.set_visible(False)
for t in colorbar.ax.get_xticklabels():
t.set_fontsize(fontsize)
t.set_fontweight(fontweight)
else:
for tick in colorbar.ax.yaxis.get_ticklines():
tick.set_visible(False)
for t in colorbar.ax.get_yticklabels():
t.set_fontsize(fontsize)
t.set_fontweight(fontweight)
# Alternate text top and bottom on a horizontal colorbar if requested
# Use method described at:
# https://stackoverflow.com/questions/37161022/matplotlib-colorbar-
# alternating-top-bottom-labels
if text_up_down or text_down_up:
vmin = colorbar.norm.vmin
vmax = colorbar.norm.vmax
if colorbar.extend == 'min':
shift_l = 0.05
scaling = 0.95
elif colorbar.extend == 'max':
shift_l = 0.
scaling = 0.95
elif colorbar.extend == 'both':
shift_l = 0.05
scaling = 0.9
else:
shift_l = 0.
scaling = 1.0
# Print bottom tick labels
colorbar.ax.set_xticklabels(lbot)
# Print top tick labels
for ii in ltop:
colorbar.ax.text(shift_l + scaling*(ii-vmin)/(vmax-vmin),
1.5, str(ii), transform=colorbar.ax.transAxes,
va='bottom', ha='center', fontsize=fontsize,
fontweight=fontweight)
for t in colorbar.ax.get_xticklabels():
t.set_fontsize(fontsize)
t.set_fontweight(fontweight)
def map_title(title=None, dims=False):
"""
| map_title is an internal routine to draw a title on a map plot
|
| title=None - title to put on map plot
| dim=False - draw a set of dimension titles
|
|
|
|
|
"""
boundinglat = plotvars.boundinglat
lon_0 = plotvars.lon_0
lonmin = plotvars.lonmin
lonmax = plotvars.lonmax
latmin = plotvars.latmin
latmax = plotvars.latmax
polar_range = 90-abs(boundinglat)
if plotvars.proj == 'cyl':
lon_mid = lonmin + (lonmax - lonmin) / 2.0
mylon = lon_mid
if dims:
mylon = lonmin
proj = ccrs.PlateCarree(central_longitude=lon_mid)
mylat = latmax
xpt, ypt = proj.transform_point(mylon, mylat, ccrs.PlateCarree())
ypt = ypt + (latmax - latmin) / 40.0
if plotvars.proj == 'npstere':
mylon = lon_0 + 180
mylat = boundinglat-polar_range/15.0
proj = ccrs.NorthPolarStereo(central_longitude=lon_0)
xpt, ypt = proj.transform_point(mylon, mylat, ccrs.PlateCarree())
if dims:
mylon = lon_0 + 180
mylat = boundinglat-polar_range/15.0
xpt_mid, ypt = proj.transform_point(mylon, mylat, ccrs.PlateCarree())
mylon = lon_0 - 90
xpt, ypt_mid = proj.transform_point(mylon, mylat, ccrs.PlateCarree())
if plotvars.proj == 'spstere':
mylon = lon_0
mylat = boundinglat+polar_range/15.0
proj = ccrs.SouthPolarStereo(central_longitude=lon_0)
if dims:
mylon = lon_0 + 0
#mylat = boundinglat-polar_range/15.0
mylat = boundinglat-polar_range/15.0
xpt_mid, ypt = proj.transform_point(mylon, mylat, ccrs.PlateCarree())
mylon = lon_0 - 90
xpt, ypt_mid = proj.transform_point(mylon, mylat, ccrs.PlateCarree())
if plotvars.proj == 'lcc':
mylon = lonmin + (lonmax - lonmin) / 2.0
if dims:
mylon = lonmin
lat_0 = 40
if latmin <= 0 and latmax <= 0:
lat_0 = 40
proj = ccrs.LambertConformal(central_longitude=plotvars.lon_0,
central_latitude=lat_0,
cutoff=plotvars.latmin)
mylat = latmax
xpt, ypt = proj.transform_point(mylon, mylat, ccrs.PlateCarree())
fontsize = plotvars.title_fontsize
if dims:
halign = 'left'
fontsize = plotvars.axis_label_fontsize
# Get plot position
this_plot = plotvars.plot
l, b, w, h = this_plot.get_position().bounds
print('initial l, b, w, h are ', l, b, w, h)
# Shift to left
#if plotvars.plot_type == 1 and plotvars.proj !=cyl:
l = l - 0.1
this_plot.set_position([l, b, w, h])
l, b, w, h = this_plot.get_position().bounds
print('changed l, b, w, h are ', l, b, w, h)
plotvars.plot.text(l + w , b + h, title, va='bottom',
ha=halign,
rotation='horizontal', rotation_mode='anchor',
fontsize=fontsize,
fontweight=plotvars.title_fontweight)
else:
halign = 'center'
plotvars.mymap.text(xpt, ypt, title, va='bottom',
ha=halign,
rotation='horizontal', rotation_mode='anchor',
fontsize=fontsize,
fontweight=plotvars.title_fontweight)
def dim_titles(title=None, dims=False):
"""
| dim_titles is an internal routine to draw a set of dimension titles on a plot
|
| title=None - title to put on map plot
| dim=False - draw a set of dimension titles
|
|
|
|
|
"""
# Get plot position
if plotvars.plot_type == 1:
this_plot = plotvars.mymap
else:
this_plot = plotvars.plot
l, b, w, h = this_plot.get_position().bounds
valign = 'bottom'
# Shift down if a cylindrical plot else to the left
if plotvars.plot_type == 1 and plotvars.proj != 'cyl':
l = l - 0.1
myx = 1.2
myy = 1.0
valign = 'top'
elif plotvars.plot_type == 1 and plotvars.proj == 'cyl':
lonrange = plotvars.lonmax - plotvars.lonmin
latrange = plotvars.latmax - plotvars.latmin
if (lonrange / latrange) > 1.1:
myx = 0.0
myy = 1.02
else:
l = l - 0.1
myx = 1.1
myy = 1.0
valign = 'top'
else:
h = h - 0.1
myx = 0.0
myy = 1.02
this_plot.set_position([l, b, w, h])
this_plot.text(myx, myy, title, va=valign,
ha='left',
fontsize=plotvars.axis_label_fontsize,
fontweight=plotvars.axis_label_fontweight,
transform=this_plot.transAxes)
def plot_map_axes(axes=None, xaxis=None, yaxis=None,
xticks=None, xticklabels=None,
yticks=None, yticklabels=None,
user_xlabel=None, user_ylabel=None,
verbose=None):
"""
| plot_map_axes is an internal routine to draw the axes on a map plot
|
| axes=None - drawing axes
| xaxis=None - drawing x-axis
| yaxis=None - drawing x-axis
| xticks=None - user defined xticks
| xticklabels=None - user defined xtick labels
| yticks=None - user defined yticks
| yticklabels=None - user defined ytick labels
| user_xlabel=None - user defined xlabel
| user_ylabel=None - user defined ylabel
| verbose=None
|
|
|
|
|
"""
# Font definitions
axis_label_fontsize = plotvars.axis_label_fontsize
axis_label_fontweight = plotvars.axis_label_fontweight
# Map parameters
boundinglat = plotvars.boundinglat
lon_0 = plotvars.lon_0
lonmin = plotvars.lonmin
lonmax = plotvars.lonmax
latmin = plotvars.latmin
latmax = plotvars.latmax
# Cylindrical
if plotvars.proj == 'cyl':
if verbose:
print('con - adding cylindrical axes')
lonticks, lonlabels = mapaxis(
min=plotvars.lonmin, max=plotvars.lonmax, type=1)
latticks, latlabels = mapaxis(
min=plotvars.latmin, max=plotvars.latmax, type=2)
if axes:
if xaxis:
if xticks is None:
axes_plot(xticks=lonticks, xticklabels=lonlabels)
else:
if xticklabels is None:
axes_plot(xticks=xticks, xticklabels=xticks)
else:
axes_plot(xticks=xticks, xticklabels=xticklabels)
if yaxis:
if yticks is None:
axes_plot(yticks=latticks, yticklabels=latlabels)
else:
if yticklabels is None:
axes_plot(yticks=yticks, yticklabels=yticks)
else:
axes_plot(yticks=yticks, yticklabels=yticklabels)
if user_xlabel is not None:
plot.text(0.5, -0.10, user_xlabel, va='bottom',
ha='center',
rotation='horizontal', rotation_mode='anchor',
transform=plotvars.mymap.transAxes,
fontsize=axis_label_fontsize,
fontweight=axis_label_fontweight)
if user_ylabel is not None:
plot.text(-0.05, 0.50, user_ylabel, va='bottom',
ha='center',
rotation='vertical', rotation_mode='anchor',
transform=plotvars.mymap.transAxes,
fontsize=axis_label_fontsize,
fontweight=axis_label_fontweight)
# Polar stereographic
if plotvars.proj == 'npstere' or plotvars.proj == 'spstere':
if verbose:
print('con - adding stereographic axes')
mymap = plotvars.mymap
latrange = 90-abs(boundinglat)
proj = ccrs.Geodetic()
# Add
if axes:
if xaxis:
if yticks is None:
latvals = np.arange(5)*30-60
else:
latvals = np.array(yticks)
if plotvars.proj == 'npstere':
latvals = latvals[np.where(latvals >= boundinglat)]
else:
latvals = latvals[np.where(latvals <= boundinglat)]
for lat in latvals:
if abs(lat - boundinglat) > 1:
lons = np.arange(361)
lats = np.zeros(361)+lat
mymap.plot(lons, lats, color=plotvars.grid_colour,
linewidth=plotvars.grid_thickness,
linestyle=plotvars.grid_linestyle,
transform=proj)
if yaxis:
if xticks is None:
lonvals = np.arange(7)*60
else:
lonvals = xticks
for lon in lonvals:
label = mapaxis(lon, lon, 1)[1][0]
if plotvars.proj == 'npstere':
lats = np.arange(90-boundinglat)+boundinglat
else:
lats = np.arange(boundinglat+91)-90
lons = np.zeros(np.size(lats))+lon
mymap.plot(lons, lats, color=plotvars.grid_colour,
linewidth=plotvars.grid_thickness,
linestyle=plotvars.grid_linestyle,
transform=proj)
# Add longitude labels
if plotvars.proj == 'npstere':
proj = ccrs.NorthPolarStereo(central_longitude=lon_0)
pole = 90
latpt = boundinglat - latrange/40.0
else:
proj = ccrs.SouthPolarStereo(central_longitude=lon_0)
pole = -90
latpt = boundinglat + latrange / 40.0
lon_mid, lat_mid = proj.transform_point(0, pole, ccrs.PlateCarree())
if xaxis and axis_label_fontsize > 0.0:
for xtick in lonvals:
label = mapaxis(xtick, xtick, 1)[1][0]
lonr, latr = proj.transform_point(xtick, latpt, ccrs.PlateCarree())
v_align = 'center'
if lonr < 1:
h_align = 'right'
if lonr > 1:
h_align = 'left'
if abs(lonr) <= 1:
h_align = 'center'
if latr < 1:
v_align = 'top'
if latr > 1:
v_align = 'bottom'
mymap.text(lonr, latr, label, horizontalalignment=h_align,
verticalalignment=v_align,
fontsize=axis_label_fontsize,
fontweight=axis_label_fontweight, zorder=101)
# Make the plot circular by blanking off around the plot
# Find min and max of plotting region in map coordinates
lons = np.arange(360)
lats = np.zeros(np.size(lons))+boundinglat
device_coords = proj.transform_points(ccrs.PlateCarree(), lons, lats)
xmin = np.min(device_coords[:, 0])
xmax = np.max(device_coords[:, 0])
ymin = np.min(device_coords[:, 1])
ymax = np.max(device_coords[:, 1])
# blank off data past the bounding latitude
pts = np.where(device_coords[:, 0] >= 0.0)
xpts = np.append(device_coords[:, 0][pts], np.zeros(np.size(pts)) + xmax)
ypts = np.append(device_coords[:, 1][pts], device_coords[:, 1][pts][::-1])
mymap.fill(xpts, ypts, alpha=1.0, color='w', zorder=100)
xpts = np.append(np.zeros(np.size(pts)) + xmin, -1.0 * device_coords[:, 0][pts])
ypts = np.append(device_coords[:, 1][pts], device_coords[:, 1][pts][::-1])
mymap.fill(xpts, ypts, alpha=1.0, color='w', zorder=100)
# Turn off map outside the cicular plot area
mymap.outline_patch.set_visible(False)
# Draw a line around the bounding latitude
lons = np.arange(361)
lats = np.zeros(np.size(lons)) + boundinglat
device_coords = proj.transform_points(ccrs.PlateCarree(), lons, lats)
mymap.plot(device_coords[:, 0], device_coords[:, 1], color='k',
zorder=100, clip_on=False)
# Modify xlim and ylim values as the default values clip the plot slightly
xmax = np.max(np.abs(mymap.set_xlim(None)))
mymap.set_xlim((-xmax, xmax), emit=False)
ymax = np.max(np.abs(mymap.set_ylim(None)))
mymap.set_ylim((-ymax, ymax), emit=False)
# Lambert conformal
if plotvars.proj == 'lcc':
lon_0 = plotvars.lonmin+(plotvars.lonmax-plotvars.lonmin)/2.0
lat_0 = plotvars.latmin+(plotvars.latmax-plotvars.latmin)/2.0
mymap = plotvars.mymap
standard_parallels = [33, 45]
if latmin <= 0 and latmax <= 0:
standard_parallels = [-45, -33]
proj = ccrs.LambertConformal(central_longitude=lon_0,
central_latitude=lat_0,
cutoff=40,
standard_parallels=standard_parallels)
lonmin = plotvars.lonmin
lonmax = plotvars.lonmax
latmin = plotvars.latmin
latmax = plotvars.latmax
# Modify xlim and ylim values as the default values clip the plot slightly
xmin = mymap.set_xlim(None)[0]
xmax = mymap.set_xlim(None)[1]
ymin = mymap.set_ylim(None)[0]
ymax = mymap.set_ylim(None)[1]
mymap.set_ylim(ymin*1.05, ymax, emit=False)
mymap.set_ylim(None)
# Mask off contours that appear because of the plot extention
# mymap.add_patch(mpatches.Polygon([[xmin, ymin], [xmax,ymin],
# [xmax, ymin*1.05], [xmin, ymin*1.05]],
# facecolor='red'))
# transform=ccrs.PlateCarree()))
lons = np.arange(lonmax-lonmin+1) + lonmin
lats = np.arange(latmax-latmin+1) + latmin
verts = []
for lat in lats:
verts.append([lonmin, lat])
for lon in lons:
verts.append([lon, latmax])
for lat in lats[::-1]:
verts.append([lonmax, lat])
for lon in lons[::-1]:
verts.append([lon, latmin])
# Mask left and right of plot
lats = np.arange(latmax-latmin+1) + latmin
lons = np.zeros(np.size(lats)) + lonmin
device_coords = proj.transform_points(ccrs.PlateCarree(), lons, lats)
xmin = np.min(device_coords[:, 0])
xmax = np.max(device_coords[:, 0])
if lat_0 > 0:
ymin = np.min(device_coords[:, 1])
ymax = np.max(device_coords[:, 1])
else:
ymin = np.max(device_coords[:, 1])
ymax = np.min(device_coords[:, 1])
# Left
mymap.fill([xmin, xmin, xmax, xmin],
[ymin, ymax, ymax, ymin],
alpha=1.0, color='w', zorder=100)
mymap.plot([xmin, xmax], [ymin, ymax], color='k', zorder=101, clip_on=False)
# Right
mymap.fill([-xmin, -xmin, -xmax, -xmin],
[ymin, ymax, ymax, ymin],
alpha=1.0, color='w', zorder=100)
mymap.plot([-xmin, -xmax], [ymin, ymax], color='k', zorder=101, clip_on=False)
# Upper
lons = np.arange(lonmax-lonmin+1) + lonmin
lats = np.zeros(np.size(lons)) + latmax
device_coords = proj.transform_points(ccrs.PlateCarree(), lons, lats)
ymax = np.max(device_coords[:, 1])
xpts = np.append(device_coords[:, 0], device_coords[:, 0][::-1])
ypts = np.append(device_coords[:, 1], np.zeros(np.size(lons))+ymax)
mymap.fill(xpts, ypts, alpha=1.0, color='w', zorder=100)
mymap.plot(device_coords[:, 0], device_coords[:, 1], color='k', zorder=101, clip_on=False)
# Lower
lons = np.arange(lonmax-lonmin+1) + lonmin
lats = np.zeros(np.size(lons)) + latmin
device_coords = proj.transform_points(ccrs.PlateCarree(), lons, lats)
ymin = np.min(device_coords[:, 1]) * 1.05
xpts = np.append(device_coords[:, 0], device_coords[:, 0][::-1])
ypts = np.append(device_coords[:, 1], np.zeros(np.size(lons))+ymin)
mymap.fill(xpts, ypts, alpha=1.0, color='w', zorder=100)
mymap.plot(device_coords[:, 0], device_coords[:, 1], color='k', zorder=101, clip_on=False)
# Turn off drawing of the rectangular box around the plot
mymap.outline_patch.set_visible(False)
if lat_0 < 0:
lons = np.arange(lonmax - lonmin + 1) + lonmin
lats = np.zeros(np.size(lons)) + latmax
device_coords = proj.transform_points(ccrs.PlateCarree(), lons, lats)
xmin = np.min(device_coords[:, 0])
xmax = np.max(device_coords[:, 0])
lons = np.arange(lonmax-lonmin+1) + lonmin
lats = np.zeros(np.size(lons)) + latmin
device_coords = proj.transform_points(ccrs.PlateCarree(), lons, lats)
ymax = np.min(device_coords[:, 1])
ymin = ymax * 1.1
xpts = [xmin, xmax, xmax, xmin, xmin]
ypts = [ymin, ymin, ymax, ymax, ymin]
mymap.fill(xpts, ypts, alpha=1.0, color='w', zorder=100)
# Draw longitudes and latitudes if requested
fs = plotvars.axis_label_fontsize
fw = plotvars.axis_label_fontweight
if axes and xaxis:
if xticks is None:
map_xticks, map_xticklabels = mapaxis(min=plotvars.lonmin,
max=plotvars.lonmax, type=1)
else:
map_xticks = xticks
if xticklabels is None:
map_xticklabels = xticks
else:
map_xticklabels = xticklabels
if axes and xaxis:
lats = np.arange(latmax - latmin + 1) + latmin
for tick in np.arange(np.size(map_xticks)):
lons = np.zeros(np.size(lats)) + map_xticks[tick]
device_coords = proj.transform_points(ccrs.PlateCarree(), lons, lats)
mymap.plot(device_coords[:, 0], device_coords[:, 1],
linewidth=plotvars.grid_thickness,
linestyle=plotvars.grid_linestyle,
color=plotvars.grid_colour,
zorder=101)
latpt = latmin - 3
if lat_0 < 0:
latpt = latmax + 1
device_coords = proj.transform_point(map_xticks[tick], latpt,
ccrs.PlateCarree())
mymap.text(device_coords[0], device_coords[1],
map_xticklabels[tick],
horizontalalignment='center',
fontsize=fs,
fontweight=fw,
zorder=101)
if yticks is None:
map_yticks, map_yticklabels = mapaxis(min=plotvars.latmin,
max=plotvars.latmax,
type=2)
else:
map_yticks = yticks
if yticklabels is None:
map_yticklabels = yticks
else:
map_yticklabels = yticklabels
if axes and yaxis:
lons = np.arange(lonmax-lonmin+1) + lonmin
for tick in np.arange(np.size(map_yticks)):
lats = np.zeros(np.size(lons)) + map_yticks[tick]
device_coords = proj.transform_points(ccrs.PlateCarree(), lons, lats)
mymap.plot(device_coords[:, 0],
device_coords[:, 1],
linewidth=plotvars.grid_thickness,
linestyle=plotvars.grid_linestyle,
color=plotvars.grid_colour,
zorder=101)
device_coords = proj.transform_point(lonmin-1,
map_yticks[tick],
ccrs.PlateCarree())
mymap.text(device_coords[0],
device_coords[1],
map_yticklabels[tick],
horizontalalignment='right',
verticalalignment='center',
fontsize=fs,
fontweight=fw,
zorder=101)
device_coords = proj.transform_point(lonmax+1,
map_yticks[tick],
ccrs.PlateCarree())
mymap.text(device_coords[0],
device_coords[1],
map_yticklabels[tick],
horizontalalignment='left',
verticalalignment='center',
fontsize=fs,
fontweight=fw,
zorder=101)
# UKCP grid
if plotvars.proj == 'UKCP' and plotvars.grid:
lonmin = -11
lonmax = 3
latmin = 49
latmax = 61
spacing = plotvars.grid_spacing
if xticks is None:
lons = np.arange(30 / spacing + 1) * spacing
lons = np.append((lons*-1)[::-1], lons[1:])
else:
lons = xticks
if yticks is None:
lats = np.arange(90.0 / spacing + 1) * spacing
else:
lats = yticks
if plotvars.grid:
plotvars.mymap.gridlines(color=plotvars.grid_colour,
linewidth=plotvars.grid_thickness,
linestyle=plotvars.grid_linestyle,
xlocs=lons, ylocs=lats)
def add_cyclic(field, lons):
"""
| add_cyclic is a wrapper for cartopy_util.add_cyclic_point(field, lons)
| This is needed for the case of when the longitudes are not evenly spaced
| due to numpy rounding which causes an error from the cartopy wrapping routine.
| In this case the longitudes are promoted to 64 bit and then rounded
| to an appropriate number of decimal places before passing to the cartopy
| add_cyclic routine.
"""
try:
field, lons = cartopy_util.add_cyclic_point(field, lons)
except Exception:
ndecs_max = max_ndecs_data(lons)
lons = np.float64(lons).round(ndecs_max)
field, lons = cartopy_util.add_cyclic_point(field, lons)
return field, lons
def ugrid_window(field, lons,lats):
field_ugrid = deepcopy(field)
lons_ugrid = deepcopy(lons)
lats_ugrid = deepcopy(lats)
# Fix longitudes to be -180 to 180
# lons_ugrid = ((lons_ugrid + plotvars.lonmin) % 360) + plotvars.lonmin
# Test data to get appropiate longitude offset to perform remapping
found_lon = False
for ilon in [-360, 0, 360]:
lons_test = lons_ugrid + ilon
if np.min(lons_test) <= plotvars.lonmin:
found_lon = True
lons_offset = ilon
if found_lon:
lons_ugrid = lons_ugrid + lons_offset
pts = np.where(lons_ugrid < plotvars.lonmin)
lons_ugrid[pts] = lons_ugrid[pts] + 360.0
else:
errstr = '/n/n cf-plot error - cannot determine grid offset in add_cyclic_ugrid/n/n'
raise Warning(errstr)
field_wrap = deepcopy(field_ugrid)
lons_wrap = deepcopy(lons_ugrid)
lats_wrap = deepcopy(lats_ugrid)
delta = 120.0
pts_left = np.where(lons_wrap >= plotvars.lonmin + 360 - delta)
lons_left = lons_wrap[pts_left] - 360.0
lats_left = lats_wrap[pts_left]
field_left = field_wrap[pts_left]
field_wrap = np.concatenate([field_wrap, field_left])
lons_wrap = np.concatenate([lons_wrap, lons_left])
lats_wrap = np.concatenate([lats_wrap, lats_left])
# Make a line of interpolated data on left hand side of plot and insert this into the data
# on both the left and the right before contouring
lons_new = np.zeros(181) + plotvars.lonmin
lats_new = np.arange(181) - 90
field_new = griddata((lons_wrap, lats_wrap), field_wrap, (lons_new, lats_new), method='linear')
# Remove any non finite points in the interpolated data
pts = np.where(np.isfinite(field_new))
field_new = field_new[pts]
lons_new = lons_new[pts]
lats_new = lats_new[pts]
# Add the interpolated data to the left
field_ugrid = np.concatenate([field_ugrid, field_new])
lons_ugrid = np.concatenate([lons_ugrid, lons_new])
lats_ugrid = np.concatenate([lats_ugrid, lats_new])
# Add to the right if a fiull globe is being plotted
# The 359.99 here is needed or Cartopy will map 360 back to 0
if plotvars.lonmax - plotvars.lonmin == 360:
field_ugrid = np.concatenate([field_ugrid, field_new])
lons_ugrid = np.concatenate([lons_ugrid, lons_new + 359.95])
lats_ugrid = np.concatenate([lats_ugrid, lats_new])
else:
lons_new2 = np.zeros(181) + plotvars.lonmax
lats_new2 = np.arange(181) - 90
field_new2 = griddata((lons_wrap, lats_wrap), field_wrap, (lons_new2, lats_new2), method='linear')
# Remove any non finite points in the interpolated data
pts = np.where(np.isfinite(field_new2))
field_new2 = field_new2[pts]
lons_new2 = lons_new2[pts]
lats_new2 = lats_new2[pts]
# Add the interpolated data to the right
field_ugrid = np.concatenate([field_ugrid, field_new2])
lons_ugrid = np.concatenate([lons_ugrid, lons_new2])
lats_ugrid = np.concatenate([lats_ugrid, lats_new2])
# Finally remove any point off to the right of plotvars.lonmax
pts = np.where(lons_ugrid <= plotvars.lonmax)
if np.size(pts) > 0:
field_ugrid = field_ugrid[pts]
lons_ugrid = lons_ugrid[pts]
lats_ugrid = lats_ugrid[pts]
return field_ugrid, lons_ugrid, lats_ugrid
def max_ndecs_data(data):
ndecs_max = 1
data_ndecs = np.zeros(len(data))
for i in np.arange(len(data)):
data_ndecs[i] = len(str(data[i]).split('.')[1])
if max(data_ndecs) >= ndecs_max:
# Reset large decimal vales to zero
if min(data_ndecs) < 10:
pts = np.where(data_ndecs >= 10)
data_ndecs[pts] = 0
ndecs_max = int(max(data_ndecs))
return ndecs_max
def fix_floats(data):
"""
| fix_floats fixes numpy rounding issues where 0.4 becomes
| 0.399999999999999999999
|
"""
# Return unchecked if any values have an e in them, for example 7.85e-8
has_e = False
for val in data:
if 'e' in str(val):
has_e = True
if has_e:
return(data)
data_ndecs = np.zeros(len(data))
for i in np.arange(len(data)):
data_ndecs[i] = len(str(float(data[i])).split('.')[1])
if max(data_ndecs) >= 10:
# Reset large decimal vales to zero
if min(data_ndecs) < 10:
pts = np.where(data_ndecs >= 10)
data_ndecs[pts] = 0
ndecs_max = int(max(data_ndecs))
# Reset to new ndecs_max decimal places
for i in np.arange(len(data)):
data[i] = round(data[i], ndecs_max)
else:
# fix to two or more decimal places
nd = 2
data_range = 0.0
data_temp = data
while data_range == 0.0:
data_temp = deepcopy(data)
for i in np.arange(len(data_temp)):
data_temp[i] = round(data_temp[i], nd)
data_range = np.max(data_temp) - np.min(data_temp)
nd = nd + 1
data = data_temp
return(data)
def calculate_levels(field=None, level_spacing=None, verbose=None):
dmin = np.nanmin(field)
dmax = np.nanmax(field)
tight = True
field2 = deepcopy(field)
if plotvars.user_levs == 1:
# User defined
if verbose:
print('cfp.calculate_levels - using user defined contour levels')
clevs = plotvars.levels
mult = 0
fmult = 1
else:
if plotvars.levels_step is None:
# Automatic levels
mult = 0
fmult = 1
if verbose:
print('cfp.calculate_levels - generating automatic contour levels')
if level_spacing == 'outlier' or level_spacing == 'inspect':
hist = np.histogram(field, 100)[0]
pts = np.size(field)
rate = 0.01
outlier_detected = False
if sum(hist[1:-2]) ==0:
if hist[0] / hist[-1] < rate:
outlier_detected = True
pts = np.where(field == dmin)
field2[pts] = dmax
dmin = np.nanmin(field2)
if hist[-1] / hist[0] < rate:
outlier_detected = True
pts = np.where(field == dmax)
field2[pts] = dmin
dmax = np.nanmax(field2)
clevs, mult = gvals(dmin=dmin, dmax=dmax)
fmult = 10**-mult
tight = False
if level_spacing == 'linear':
clevs, mult = gvals(dmin=dmin, dmax=dmax)
fmult = 10**-mult
tight = False
if level_spacing == 'log' or level_spacing == 'loglike':
if dmin < 0.0 and dmax < 0.0:
dmin1 = abs(dmax)
dmax1 = abs(dmin)
if dmin > 0.0 and dmax > 0.0:
dmin1 = abs(dmin)
dmax1 = abs(dmax)
if dmin <= 0.0 and dmax >= 0.0:
dmax1 = max(abs(dmin), dmax)
pts = np.where(field < 0.0)
close_below = np.max(field[pts])
pts = np.where(field > 0.0)
close_above = np.min(field[pts])
dmin1 = min(abs(close_below), close_above)
# Generate levels
if level_spacing == 'log':
clevs = []
for i in np.arange(31):
val = 10**(i-30.)
clevs.append("{:.0e}".format(val))
if level_spacing == 'loglike':
clevs = []
for i in np.arange(61):
val = 10**(i-30.)
clevs.append("{:.0e}".format(val))
clevs.append("{:.0e}".format(val*2))
clevs.append("{:.0e}".format(val*5))
clevs = np.float64(clevs)
# Remove out of range levels
clevs = np.float64(clevs)
pts = np.where(np.logical_and(clevs >= abs(dmin1), clevs <= abs(dmax1)))
clevs = clevs[pts]
if dmin < 0.0 and dmax < 0.0:
clevs = -1.0*clevs[::-1]
if dmin <= 0.0 and dmax >= 0.0:
clevs = np.concatenate([-1.0*clevs[::-1], [0.0], clevs])
# Use step to generate the levels
if plotvars.levels_step is not None:
if verbose:
print('calculate_levels - using specified step to generate contour levels')
step = plotvars.levels_step
if isinstance(step, int):
dmin = int(dmin)
dmax = int(dmax)
fmult = 1
mult = 0
clevs = []
if dmin < 0:
clevs = ((np.arange(-1*dmin/step+1)*-step)[::-1])
if dmax > 0:
if np.size(clevs) > 0:
clevs = np.concatenate((clevs[:-1], np.arange(dmax/step+1)*step))
else:
clevs = np.arange(dmax/step+1)*step
if isinstance(step, int):
clevs = clevs.astype(int)
# Remove any out of data range values
if tight:
pts = np.where(np.logical_and(clevs >= dmin, clevs <= dmax))
clevs = clevs[pts]
# Add an extra contour level if less than two levels are present
if np.size(clevs) < 2:
clevs.append(clevs[0]+0.001)
# Test for large numer of decimal places and fix if necessary
if plotvars.levels is None:
if isinstance(clevs[0], float):
clevs = fix_floats(clevs)
return(clevs, mult, fmult)
def stream(u=None, v=None, x=None, y=None, density=None, linewidth=None,
color=None, arrowsize=None, arrowstyle=None, minlength=None,
maxlength=None, axes=True,
xaxis=True, yaxis=True, xticks=None, xticklabels=None, yticks=None,
yticklabels=None, xlabel=None, ylabel=None, title=None,
zorder=None):
"""
| stream - plot a streamplot which is used to show fluid flow and 2D field gradients
|
| u=None - u wind
| v=None - v wind
| x=None - x locations of u and v
| y=None - y locations of u and v
| density=None - controls the closeness of streamlines. When density = 1,
| the domain is divided into a 30x30 grid
| linewidth=None - the width of the stream lines. With a 2D array the line width
| can be varied across the grid. The array must have the same shape
| as u and v
| color=None - the streamline color
| arrowsize=None - scaling factor for the arrow size
| arrowstyle=None - arrow style specification
| minlength=None - minimum length of streamline in axes coordinates
| maxlength=None - maximum length of streamline in axes coordinates
| axes=True - plot x and y axes
| xaxis=True - plot xaxis
| yaxis=True - plot y axis
| xticks=None - xtick positions
| xticklabels=None - xtick labels
| yticks=None - y tick positions
| yticklabels=None - ytick labels
| xlabel=None - label for x axis
| ylabel=None - label for y axis
| title=None - title for plot
| zorder=None - plotting order
|
:Returns:
None
|
|
|
"""
colorbar_title = ''
if title is None:
title = ''
text_fontsize = plotvars.text_fontsize
continent_thickness = plotvars.continent_thickness
continent_color = plotvars.continent_color
if text_fontsize is None:
text_fontsize = 11
if continent_thickness is None:
continent_thickness = 1.5
if continent_color is None:
continent_color = 'k'
title_fontsize = plotvars.title_fontsize
if title_fontsize is None:
title_fontsize = 15
resolution_orig = plotvars.resolution
rotated_vect = False
# Set potential user axis labels
user_xlabel = xlabel
user_ylabel = ylabel
# Set any additional arguments to streamplot
plotargs = {}
if density is not None:
plotargs['density'] = density
if linewidth is not None:
plotargs['linewidth'] = linewidth
if color is not None:
plotargs['color'] = color
if arrowsize is not None:
plotargs['arrowsize'] = arrowsize
if arrowstyle is not None:
plotargs['arrowstyle'] = arrowstyle
if minlength is not None:
plotargs['minlength'] = minlength
if maxlength is not None:
plotargs['maxlength'] = maxlength
# Extract required data
# If a cf-python field
if isinstance(u, cf.Field):
# Check data is 2D
ndims = np.squeeze(u.data).ndim
if ndims != 2:
errstr = "\n\ncfp.vect error need a 2 dimensonal u field to make vectors\n"
errstr += "received " + str(np.squeeze(u.data).ndim)
if ndims == 1:
errstr += " dimension\n\n"
else:
errstr += " dimensions\n\n"
raise TypeError(errstr)
u_data, u_x, u_y, ptype, colorbar_title, xlabel, ylabel, xpole, \
ypole = cf_data_assign(u, colorbar_title, rotated_vect=rotated_vect)
elif isinstance(u, cf.FieldList):
raise TypeError("Can't plot a field list")
else:
# field=f #field data passed in as f
check_data(u, x, y)
u_data = deepcopy(u)
u_x = deepcopy(x)
u_y = deepcopy(y)
xlabel = ''
ylabel = ''
if isinstance(v, cf.Field):
# Check data is 2D
ndims = np.squeeze(v.data).ndim
if ndims != 2:
errstr = "\n\ncfp.vect error need a 2 dimensonal v field to make vectors\n"
errstr += "received " + str(np.squeeze(v.data).ndim)
if ndims == 1:
errstr += " dimension\n\n"
else:
errstr += " dimensions\n\n"
raise TypeError(errstr)
v_data, v_x, v_y, ptype, colorbar_title, xlabel, ylabel, xpole, \
ypole = cf_data_assign(v, colorbar_title, rotated_vect=rotated_vect)
elif isinstance(v, cf.FieldList):
raise TypeError("Can't plot a field list")
else:
# field=f #field data passed in as f
check_data(v, x, y)
v_data = deepcopy(v)
xlabel = ''
ylabel = ''
# Reset xlabel and ylabel values with user defined labels in specified
if user_xlabel is not None:
xlabel = user_xlabel
if user_ylabel is not None:
ylabel = user_ylabel
# Retrieve any user defined axis labels
if xlabel == '' and plotvars.xlabel is not None:
xlabel = plotvars.xlabel
if ylabel == '' and plotvars.ylabel is not None:
ylabel = plotvars.ylabel
if xticks is None and plotvars.xticks is not None:
xticks = plotvars.xticks
if plotvars.xticklabels is not None:
xticklabels = plotvars.xticklabels
else:
xticklabels = list(map(str, xticks))
if yticks is None and plotvars.yticks is not None:
yticks = plotvars.yticks
if plotvars.yticklabels is not None:
yticklabels = plotvars.yticklabels
else:
yticklabels = list(map(str, yticks))
# Open a new plot if necessary
if plotvars.user_plot == 0:
gopen(user_plot=0)
# Set plot type if user specified
if (ptype is not None):
plotvars.plot_type = ptype
lonrange = np.nanmax(u_x) - np.nanmin(u_x)
latrange = np.nanmax(u_y) - np.nanmin(u_y)
if plotvars.plot_type == 1:
# Set up mapping
if (lonrange > 350 and latrange > 170) or plotvars.user_mapset == 1:
set_map()
else:
mapset(lonmin=np.nanmin(u_x), lonmax=np.nanmax(u_x),
latmin=np.nanmin(u_y), latmax=np.nanmax(u_y),
user_mapset=0, resolution=resolution_orig)
set_map()
mymap = plotvars.mymap
# Map streamplot
if plotvars.plot_type == 1:
plotvars.mymap.streamplot(u_x, u_y, u_data, v_data,
transform=ccrs.PlateCarree(),
**plotargs)
# axes
plot_map_axes(axes=axes, xaxis=xaxis, yaxis=yaxis,
xticks=xticks, xticklabels=xticklabels,
yticks=yticks, yticklabels=yticklabels,
user_xlabel=user_xlabel, user_ylabel=user_ylabel,
verbose=False)
# Coastlines
continent_thickness = plotvars.continent_thickness
continent_color = plotvars.continent_color
continent_linestyle = plotvars.continent_linestyle
if continent_thickness is None:
continent_thickness = 1.5
if continent_color is None:
continent_color = 'k'
if continent_linestyle is None:
continent_linestyle = 'solid'
feature = cfeature.NaturalEarthFeature(
name='land', category='physical',
scale=plotvars.resolution,
facecolor='none')
mymap.add_feature(feature, edgecolor=continent_color,
linewidth=continent_thickness,
linestyle=continent_linestyle)
# Title
if title is not None:
map_title(title)
##########
# Save plot
##########
if plotvars.user_plot == 0:
gset()
cscale()
gclose()
if plotvars.user_mapset == 0:
mapset()
mapset(resolution=resolution_orig)
def bfill_ugrid(f=None, face_lons=None, face_lats=None, face_connectivity=None, clevs=None,
alpha=None, zorder=None):
"""
| bfill_ugrid - block fill a UGRID field with colour rectangles
| This is an internal routine and is not generally used by the user.
|
| f=None - field
| face_lons=None - longitude points for face vertices
| face_lats=None - latitude points for face verticies
| face_connectivity=None - connectivity for face verticies
| clevs=None - levels for filling
| lonlat=False - lonlat data
| bound=False - x and y are cf data boundaries
| alpha=alpha - transparency setting 0 to 1
| zorder=None - plotting order
|
:Returns:
None
|
|
|
|
"""
# Colour faces according to value
# Set faces to white initially
cols = ['#000000' for x in range(len(face_connectivity))]
levs = deepcopy(np.array(clevs))
if plotvars.levels_extend == 'min' or plotvars.levels_extend == 'both':
levs = np.concatenate([[-1e20], levs])
ilevs_max = np.size(levs)
if plotvars.levels_extend == 'max' or plotvars.levels_extend == 'both':
levs = np.concatenate([levs, [1e20]])
else:
ilevs_max = ilevs_max - 1
for ilev in np.arange(ilevs_max):
lev = levs[ilev]
col = plotvars.cs[ilev]
pts = np.where(f.squeeze() >= lev)[0]
if np.min(pts) >= 0:
for val in np.arange(np.size(pts)):
pt = pts[val]
cols[pt]=col
plotargs = {'transform': ccrs.PlateCarree()}
coords_all = []
for iface in np.arange(len(face_connectivity)):
lons = np.array([face_lons[i] for i in face_connectivity[iface]])
lats = np.array([face_lats[i] for i in face_connectivity[iface]])
coords = [(lons[i], lats[i]) for i in np.arange(len(lons))]
if (np.max(lons) - np.min(lons)) > 100:
if np.max(lons) > 180:
for i in np.arange(len(lons)):
lons[i] = (lons[i] + 180) % 360 - 180
else:
for i in np.arange(len(lons)):
lons[i] = lons[i] % 360
coords = [(lons[i], lats[i]) for i in np.arange(len(lons))]
# Add extra verticies if any of the points are at the north or south pole
if np.max(lats) == 90 or np.min(lats) == -90:
geom = sgeom.Polygon([(face_lons[i], face_lats[i]) for i in face_connectivity[iface]])
geom_cyl = ccrs.PlateCarree().project_geometry(geom, ccrs.Geodetic())
coords = geom_cyl[0].exterior.coords[:]
coords_all.append(coords)
plotvars.mymap.add_collection(PolyCollection(coords_all, facecolors=cols, edgecolors=None,
alpha=alpha, zorder=zorder, **plotargs))
| mit |
ephes/scikit-learn | examples/cross_decomposition/plot_compare_cross_decomposition.py | 142 | 4761 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coefs with B
print("Estimated B")
print(np.round(pls2.coefs, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of compements exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coefs, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
| bsd-3-clause |
paalge/scikit-image | doc/examples/features_detection/plot_multiblock_local_binary_pattern.py | 9 | 2603 | """
===========================================================
Multi-Block Local Binary Pattern for texture classification
===========================================================
This example shows how to compute multi-block local binary pattern (MB-LBP)
features as well as how to visualize them.
The features are calculated similarly to local binary patterns (LBPs), except
that summed blocks are used instead of individual pixel values.
MB-LBP is an extension of LBP that can be computed on multiple scales in
constant time using the integral image. 9 equally-sized rectangles are used to
compute a feature. For each rectangle, the sum of the pixel intensities is
computed. Comparisons of these sums to that of the central rectangle determine
the feature, similarly to LBP (See `LBP <plot_local_binary_pattern.html>`_).
First, we generate an image to illustrate the functioning of MB-LBP: consider
a (9, 9) rectangle and divide it into (3, 3) block, upon which we then apply
MB-LBP.
"""
from __future__ import print_function
from skimage.feature import multiblock_lbp
import numpy as np
from numpy.testing import assert_equal
from skimage.transform import integral_image
# Create test matrix where first and fifth rectangles starting
# from top left clockwise have greater value than the central one.
test_img = np.zeros((9, 9), dtype='uint8')
test_img[3:6, 3:6] = 1
test_img[:3, :3] = 50
test_img[6:, 6:] = 50
# First and fifth bits should be filled. This correct value will
# be compared to the computed one.
correct_answer = 0b10001000
int_img = integral_image(test_img)
lbp_code = multiblock_lbp(int_img, 0, 0, 3, 3)
assert_equal(correct_answer, lbp_code)
######################################################################
# Now let's apply the operator to a real image and see how the visualization
# works.
from skimage import data
from matplotlib import pyplot as plt
from skimage.feature import draw_multiblock_lbp
test_img = data.coins()
int_img = integral_image(test_img)
lbp_code = multiblock_lbp(int_img, 0, 0, 90, 90)
img = draw_multiblock_lbp(test_img, 0, 0, 90, 90,
lbp_code=lbp_code, alpha=0.5)
plt.imshow(img, interpolation='nearest')
plt.show()
######################################################################
# On the above plot we see the result of computing a MB-LBP and visualization
# of the computed feature. The rectangles that have less intensities' sum
# than the central rectangle are marked in cyan. The ones that have higher
# intensity values are marked in white. The central rectangle is left
# untouched.
| bsd-3-clause |
felipeacsi/python-acoustics | acoustics/_signal.py | 1 | 36315 | import itertools
import matplotlib.pyplot as plt
import numpy as np
from scipy.io import wavfile
from scipy.signal import detrend, lfilter, bilinear, spectrogram, filtfilt, resample, fftconvolve
import acoustics
from acoustics.standards.iso_tr_25417_2007 import REFERENCE_PRESSURE
from acoustics.standards.iec_61672_1_2013 import WEIGHTING_SYSTEMS
from acoustics.standards.iec_61672_1_2013 import (NOMINAL_OCTAVE_CENTER_FREQUENCIES,
NOMINAL_THIRD_OCTAVE_CENTER_FREQUENCIES)
class Signal(np.ndarray):
"""A signal consisting of samples (array) and a sample frequency (float).
"""
def __new__(cls, data, fs):
obj = np.asarray(data).view(cls)
obj.fs = fs
return obj
def __array_prepare__(self, array, context=None):
try:
a = context[1][0]
b = context[1][1]
except IndexError:
return array
if hasattr(a, 'fs') and hasattr(b, 'fs'):
if a.fs == b.fs:
return array
else:
raise ValueError("Sample frequencies do not match.")
else:
return array
def __array_wrap__(self, out_arr, context=None):
return np.ndarray.__array_wrap__(self, out_arr, context)
def __array_finalize__(self, obj):
# see InfoArray.__array_finalize__ for comments
if obj is None:
return
self.fs = getattr(obj, 'fs', None)
def __reduce__(self):
# Get the parent's __reduce__ tuple
pickled_state = super(Signal, self).__reduce__()
# Create our own tuple to pass to __setstate__
new_state = pickled_state[2] + (self.fs, )
# Return a tuple that replaces the parent's __setstate__ tuple with our own
return (pickled_state[0], pickled_state[1], new_state)
def __setstate__(self, state):
self.fs = state[-1] # Set the info attribute
# Call the parent's __setstate__ with the other tuple elements.
super(Signal, self).__setstate__(state[0:-1])
def __repr__(self):
return "Signal({})".format(str(self))
def _construct(self, x):
"""Construct signal like x."""
return Signal(x, self.fs)
@property
def samples(self):
"""Amount of samples in signal."""
return self.shape[-1]
@property
def channels(self):
"""Amount of channels.
"""
if self.ndim > 1:
return self.shape[-2]
else:
return 1
@property
def duration(self):
"""Duration of signal in seconds.
"""
return float(self.samples / self.fs)
@property
def values(self):
"""Return the values of this signal as an instance of :class:`np.ndarray`."""
return np.array(self)
def calibrate_to(self, decibel, inplace=False):
"""Calibrate signal to value `decibel`.
:param decibel: Value to calibrate to.
:param inplace: Whether to perform inplace or not.
:returns: Calibrated signal.
:rtype: :class:`Signal`
Values of `decibel` are broadcasted. To set a value per channel, use `decibel[...,None]`.
"""
decibel = decibel * np.ones(self.shape)
gain = decibel - self.leq()[..., None]
return self.gain(gain, inplace=inplace)
def calibrate_with(self, other, decibel, inplace=False):
"""Calibrate signal with other signal.
:param other: Other signal/array.
:param decibel: Signal level of `other`.
:param inplace: Whether to perform inplace or not.
:returns: calibrated signal.
:rtype: :class:`Signal`
"""
if not isinstance(other, Signal):
other = Signal(other, self.fs)
gain = decibel - other.leq()
return self.gain(gain, inplace=inplace)
def decimate(self, factor, zero_phase=False, ftype='iir', order=None):
"""Decimate signal by integer `factor`. Before downsampling a low-pass filter is applied.
:param factor: Downsampling factor.
:param zero_phase: Prevent phase shift by filtering with ``filtfilt`` instead of ``lfilter``.
:param ftype: Filter type.
:param order: Filter order.
:returns: Decimated signal.
:rtype: :class:`Signal`
.. seealso:: :func:`scipy.signal.decimate`
.. seealso:: :meth:`resample`
"""
return Signal(
acoustics.signal.decimate(x=self, q=factor, n=order, ftype=ftype, zero_phase=zero_phase), self.fs / factor)
def resample(self, nsamples, times=None, axis=-1, window=None):
"""Resample signal.
:param samples: New amount of samples.
:param times: Times corresponding to samples.
:param axis: Axis.
:param window: Window.
.. seealso:: :func:`scipy.signal.resample`
.. seealso:: :meth:`decimate`
You might want to low-pass filter this signal before resampling.
"""
return Signal(resample(self, nsamples, times, axis, window), nsamples / self.samples * self.fs)
def upsample(self, factor, axis=-1):
"""Upsample signal with integer factor.
:param factor: Upsample factor.
:param axis: Axis.
.. seealso:: :meth:`resample`
"""
return self.resample(int(self.samples * factor), axis=axis)
def gain(self, decibel, inplace=False):
"""Apply gain of `decibel` decibels.
:param decibel: Decibels
:param inplace: In place
:returns: Amplified signal.
:rtype: :class:`Signal`
"""
factor = 10.0**(decibel / 20.0)
if inplace:
self *= factor
return self
else:
return self * factor
def pick(self, start=0.0, stop=None):
"""Get signal from start time to stop time.
:param start: Start time.
:type start: float
:param stop: End time.
:type stop: float
:returns: Selected part of the signal.
:rtype: :class:`Signal`
"""
if start is not None:
start = int(np.floor(start * self.fs))
if stop is not None:
stop = int(np.floor(stop * self.fs))
return self[..., start:stop]
def times(self):
"""Time vector.
:returns: A vector with a timestamp for each sample.
:rtype: :class:`np.ndarray`
"""
return np.arange(0, self.samples) / self.fs
def energy(self):
"""Signal energy.
:returns: Total energy per channel.
:rtype: :class:`np.ndarray`
.. math:: E = \\sum_{n=0}^{N-1} |x_n|^2
"""
return float((self * self).sum())
def power(self):
"""Signal power.
.. math:: P = \\frac{1}{N} \\sum_{n=0}^{N-1} |x_n|^2
"""
return self.energy() / len(self)
def ms(self):
"""Mean value squared of signal.
.. seealso:: :func:`acoustics.signal.ms`
"""
return acoustics.signal.ms(self)
def rms(self):
"""Root mean squared of signal.
.. seealso:: :func:`acoustics.signal.rms`
"""
return acoustics.signal.rms(self)
#return np.sqrt(self.power())
def weigh(self, weighting='A', zero_phase=False):
"""Apply frequency-weighting. By default 'A'-weighting is applied.
:param weighting: Frequency-weighting filter to apply.
Valid options are 'A', 'C' and 'Z'. Default weighting is 'A'.
:returns: Weighted signal.
:rtype: :class:`Signal`.
By default the weighting filter is applied using
:func:`scipy.signal.lfilter` causing a frequency-dependent delay. In case a
delay is undesired, the filter can be applied using :func:`scipy.signal.filtfilt`
by setting `zero_phase=True`.
"""
num, den = WEIGHTING_SYSTEMS[weighting]()
b, a = bilinear(num, den, self.fs)
func = filtfilt if zero_phase else lfilter
return self._construct(func(b, a, self))
def correlate(self, other=None, mode='full'):
"""Correlate signal with `other` signal. In case `other==None` this
method returns the autocorrelation.
:param other: Other signal.
:param mode: Mode.
.. seealso:: :func:`np.correlate`, :func:`scipy.signal.fftconvolve`
"""
if other is None:
other = self
if self.fs != other.fs:
raise ValueError("Cannot correlate. Sample frequencies are not the same.")
if self.channels > 1 or other.channels > 1:
raise ValueError("Cannot correlate. Not supported for multichannel signals.")
return self._construct(fftconvolve(self, other[::-1], mode=mode))
def amplitude_envelope(self):
"""Amplitude envelope.
:returns: Amplitude envelope of signal.
:rtype: :class:`Signal`
.. seealso:: :func:`acoustics.signal.amplitude_envelope`
"""
return self._construct(acoustics.signal.amplitude_envelope(self, self.fs))
def instantaneous_frequency(self):
"""Instantaneous frequency.
:returns: Instantaneous frequency of signal.
:rtype: :class:`Signal`
.. seealso:: :func:`acoustics.signal.instantaneous_frequency`
"""
return self._construct(acoustics.signal.instantaneous_frequency(self, self.fs))
def instantaneous_phase(self):
"""Instantaneous phase.
:returns: Instantaneous phase of signal.
:rtype: :class:`Signal`
.. seealso:: :func:`acoustics.signal.instantaneous_phase`
"""
return self._construct(acoustics.signal.instantaneous_phase(self, self.fs))
def detrend(self, **kwargs):
"""Detrend signal.
:returns: Detrended version of signal.
:rtype: :class:`Signal`
.. seealso:: :func:`scipy.signal.detrend`
"""
return self._construct(detrend(self, **kwargs))
def unwrap(self):
"""Unwrap signal in case the signal represents wrapped phase.
:returns: Unwrapped signal.
:rtype: :class:`Signal`
.. seealso:: :func:`np.unwrap`
"""
return self._construct(np.unwrap(self))
def complex_cepstrum(self, N=None):
"""Complex cepstrum.
:param N: Amount of bins.
:returns: Quefrency, complex cepstrum and delay in amount of samples.
.. seealso:: :func:`acoustics.cepstrum.complex_cepstrum`
"""
if N is not None:
times = np.linspace(0.0, self.duration, N, endpoint=False)
else:
times = self.times()
cepstrum, ndelay = acoustics.cepstrum.complex_cepstrum(self, n=N)
return times, cepstrum, ndelay
def real_cepstrum(self, N=None):
"""Real cepstrum.
:param N: Amount of bins.
:returns: Quefrency and real cepstrum.
.. seealso:: :func:`acoustics.cepstrum.real_cepstrum`
"""
if N is not None:
times = np.linspace(0.0, self.duration, N, endpoint=False)
else:
times = self.times()
return times, acoustics.cepstrum.real_cepstrum(self, n=N)
def power_spectrum(self, N=None):
"""Power spectrum.
:param N: Amount of bins.
.. seealso:: :func:`acoustics.signal.power_spectrum`
"""
return acoustics.signal.power_spectrum(self, self.fs, N=N)
def angle_spectrum(self, N=None):
"""Phase angle spectrum. Wrapped.
:param N: amount of bins.
.. seealso::
:func:`acoustics.signal.angle_spectrum`, :func:`acoustics.signal.phase_spectrum`
and :meth:`phase_spectrum`.
"""
return acoustics.signal.angle_spectrum(self, self.fs, N=N)
def phase_spectrum(self, N=None):
"""Phase spectrum. Unwrapped.
:param N: Amount of bins.
.. seealso::
:func:`acoustics.signal.phase_spectrum`, :func:`acoustics.signal.angle_spectrum`
and :meth:`angle_spectrum`.
"""
return acoustics.signal.phase_spectrum(self, self.fs, N=N)
def peak(self, axis=-1):
"""Peak sound pressure.
:param axis: Axis.
.. seealso::
:func:`acoustic.standards.iso_tr_25417_2007.peak_sound_pressure`
"""
return acoustics.standards.iso_tr_25417_2007.peak_sound_pressure(self, axis=axis)
def peak_level(self, axis=-1):
"""Peak sound pressure level.
:param axis: Axis.
.. seealso::
:func:`acoustics.standards.iso_tr_25417_2007.peak_sound_pressure_level`
"""
return acoustics.standards.iso_tr_25417_2007.peak_sound_pressure_level(self, axis=axis)
def min(self, axis=-1):
"""Return the minimum along a given axis.
Refer to `np.amin` for full documentation.
"""
return np.ndarray.min(self, axis=axis)
def max(self, axis=-1):
"""Return the minimum along a given axis.
Refer to `np.amax` for full documentation.
"""
return np.ndarray.max(self, axis=axis)
def max_level(self, axis=-1):
"""Maximum sound pressure level.
:param axis: Axis.
.. seealso:: :func:`acoustics.standards.iso_tr_25417_2007.max_sound_pressure_level`
"""
return acoustics.standards.iso_tr_25417_2007.max_sound_pressure_level(self, axis=axis)
def sound_exposure(self, axis=-1):
"""Sound exposure.
:param axis: Axis.
.. seealso:: :func:`acoustics.standards.iso_tr_25417_2007.sound_exposure`
"""
return acoustics.standards.iso_tr_25417_2007.sound_exposure(self, self.fs, axis=axis)
def sound_exposure_level(self, axis=-1):
"""Sound exposure level.
:param axis: Axis.
.. seealso:: :func:`acoustics.standards.iso_tr_25417_2007.sound_exposure_level`
"""
return acoustics.standards.iso_tr_25417_2007.sound_exposure_level(self, self.fs, axis=axis)
def plot_complex_cepstrum(self, N=None, **kwargs):
"""Plot complex cepstrum of signal.
Valid kwargs:
* xscale
* yscale
* xlim
* ylim
* frequency: Boolean indicating whether the x-axis should show time in seconds or quefrency
* xlabel_frequency: Label in case frequency is shown.
"""
params = {
'xscale': 'linear',
'yscale': 'linear',
'xlabel': "$t$ in s",
'ylabel': "$C$",
'title': 'Complex cepstrum',
'frequency': False,
'xlabel_frequency': "$f$ in Hz",
}
params.update(kwargs)
t, ceps, _ = self.complex_cepstrum(N=N)
if params['frequency']:
t = 1. / t
params['xlabel'] = params['xlabel_frequency']
t = t[::-1]
ceps = ceps[::-1]
return _base_plot(t, ceps, params)
def plot_real_cepstrum(self, N=None, **kwargs):
"""Plot real cepstrum of signal.
Valid kwargs:
* xscale
* yscale
* xlim
* ylim
* frequency: Boolean indicating whether the x-axis should show time in seconds or quefrency
* xlabel_frequency: Label in case frequency is shown.
"""
params = {
'xscale': 'linear',
'yscale': 'linear',
'xlabel': "$t$ in s",
'ylabel': "$C$",
'title': 'Real cepstrum',
'frequency': False,
'xlabel_frequency': "$f$ in Hz",
}
params.update(kwargs)
t, ceps = self.real_cepstrum(N=N)
if params['frequency']:
t = 1. / t
params['xlabel'] = params['xlabel_frequency']
t = t[::-1]
ceps = ceps[::-1]
return _base_plot(t, ceps, params)
def plot_power_spectrum(self, N=None, **kwargs): #filename=None, scale='log'):
"""Plot spectrum of signal.
Valid kwargs:
* xscale
* yscale
* xlim
* ylim
* reference: Reference power
.. seealso:: :meth:`power_spectrum`
"""
params = {
'xscale': 'log',
'yscale': 'linear',
'xlabel': "$f$ in Hz",
'ylabel': "$L_{p}$ in dB",
'title': 'SPL',
'reference': REFERENCE_PRESSURE**2.0,
}
params.update(kwargs)
f, o = self.power_spectrum(N=N)
return _base_plot(f, 10.0 * np.log10(o / params['reference']), params)
def plot_angle_spectrum(self, N=None, **kwargs):
"""Plot phase angle spectrum of signal. Wrapped.
Valid kwargs:
* xscale
* yscale
* xlim
* ylim
* reference: Reference power
"""
params = {
'xscale': 'linear',
'yscale': 'linear',
'xlabel': "$f$ in Hz",
'ylabel': r"$\angle \phi$",
'title': 'Phase response (wrapped)',
}
params.update(kwargs)
f, o = self.angle_spectrum(N=N)
return _base_plot(f, o, params)
def plot_phase_spectrum(self, N=None, **kwargs):
"""Plot phase spectrum of signal. Unwrapped.
Valid kwargs:
* xscale
* yscale
* xlim
* ylim
* reference: Reference power
"""
params = {
'xscale': 'linear',
'yscale': 'linear',
'xlabel': "$f$ in Hz",
'ylabel': r"$\angle \phi$",
'title': 'Phase response (unwrapped)',
}
params.update(kwargs)
f, o = self.phase_spectrum(N=N)
return _base_plot(f, o, params)
def spectrogram(self, **kwargs):
"""Spectrogram of signal.
:returns: Spectrogram.
See :func:`scipy.signal.spectrogram`. Some of the default values have been changed.
The generated spectrogram consists by default of complex values.
"""
params = {
'nfft': 4096,
'noverlap': 128,
'mode': 'complex',
}
params.update(kwargs)
t, s, P = spectrogram(self, fs=self.fs, **params)
return t, s, P
def plot_spectrogram(self, **kwargs):
"""
Plot spectrogram of the signal.
Valid kwargs:
* xlim
* ylim
* clim
.. note:: This method only works for a single channel.
"""
# To do, use :meth:`spectrogram`.
params = {
'xlim': None,
'ylim': None,
'clim': None,
'NFFT': 4096,
'noverlap': 128,
'title': 'Spectrogram',
'xlabel': '$t$ in s',
'ylabel': '$f$ in Hz',
'clabel': 'SPL in dB',
'colorbar': True,
}
params.update(kwargs)
if self.channels > 1:
raise ValueError("Cannot plot spectrogram of multichannel signal. Please select a single channel.")
# Check if an axes object is passed in. Otherwise, create one.
ax0 = params.get('ax', plt.figure().add_subplot(111))
ax0.set_title(params['title'])
data = np.squeeze(self)
try:
_, _, _, im = ax0.specgram(data, Fs=self.fs, noverlap=params['noverlap'], NFFT=params['NFFT'],
mode='magnitude', scale_by_freq=False)
except AttributeError:
raise NotImplementedError(
"Your version of matplotlib is incompatible due to lack of support of the mode keyword argument to matplotlib.mlab.specgram."
)
if params['colorbar']:
cb = ax0.get_figure().colorbar(mappable=im)
cb.set_label(params['clabel'])
ax0.set_xlim(params['xlim'])
ax0.set_ylim(params['ylim'])
im.set_clim(params['clim'])
ax0.set_xlabel(params['xlabel'])
ax0.set_ylabel(params['ylabel'])
return ax0
def levels(self, time=0.125, method='average'):
"""Calculate sound pressure level as function of time.
:param time: Averaging time or integration time constant. Default value is 0.125 corresponding to FAST.
:param method: Use time `average` or time `weighting`. Default option is `average`.
:returns: sound pressure level as function of time.
.. seealso:: :func:`acoustics.standards.iec_61672_1_2013.time_averaged_sound_level`
.. seealso:: :func:`acoustics.standards.iec_61672_1_2013.time_weighted_sound_level`
"""
if method == 'average':
return acoustics.standards.iec_61672_1_2013.time_averaged_sound_level(self.values, self.fs, time)
elif method == 'weighting':
return acoustics.standards.iec_61672_1_2013.time_weighted_sound_level(self.values, self.fs, time)
else:
raise ValueError("Invalid method")
def leq(self):
"""Equivalent level. Single-value number.
.. seealso:: :func:`acoustics.standards.iso_tr_25417_2007.equivalent_sound_pressure_level`
"""
return acoustics.standards.iso_tr_25417_2007.equivalent_sound_pressure_level(self.values)
def plot_levels(self, **kwargs):
"""Plot sound pressure level as function of time.
.. seealso:: :meth:`levels`
"""
params = {
'xscale': 'linear',
'yscale': 'linear',
'xlabel': '$t$ in s',
'ylabel': '$L_{p,F}$ in dB',
'title': 'SPL',
'time': 0.125,
'method': 'average',
'labels': None,
}
params.update(kwargs)
t, L = self.levels(params['time'], params['method'])
L_masked = np.ma.masked_where(np.isinf(L), L)
return _base_plot(t, L_masked, params)
#def octave(self, frequency, fraction=1):
#"""Determine fractional-octave `fraction` at `frequency`.
#.. seealso:: :func:`acoustics.signal.fractional_octaves`
#"""
#return acoustics.signal.fractional_octaves(self, self.fs, frequency,
#frequency, fraction, False)[1]
def bandpass(self, lowcut, highcut, order=8, zero_phase=False):
"""Filter signal with band-pass filter.
:param lowcut: Lower cornerfrequency.
:param highcut: Upper cornerfrequency.
:param order: Filter order.
:param zero_phase: Prevent phase error by filtering in both directions (filtfilt).
:returns: Band-pass filtered signal.
:rtype: :class:`Signal`.
.. seealso:: :func:`acoustics.signal.bandpass`
"""
return type(self)(acoustics.signal.bandpass(self, lowcut, highcut, self.fs, order=order, zero_phase=zero_phase),
self.fs)
def bandstop(self, lowcut, highcut, order=8, zero_phase=False):
"""Filter signal with band-stop filter.
:param lowcut: Lower cornerfrequency.
:param highcut: Upper cornerfrequency.
:param order: Filter order.
:param zero_phase: Prevent phase error by filtering in both directions (filtfilt).
:returns: Band-pass filtered signal.
:rtype: :class:`Signal`.
.. seealso:: :func:`acoustics.signal.bandstop`
"""
return type(self)(acoustics.signal.bandstop(self, lowcut, highcut, self.fs, order=order, zero_phase=zero_phase),
self.fs)
def highpass(self, cutoff, order=4, zero_phase=False):
"""Filter signal with high-pass filter.
:param cutoff: Cornerfrequency.
:param order: Filter order.
:param zero_phase: Prevent phase error by filtering in both directions (filtfilt).
:returns: High-pass filtered signal.
:rtype: :class:`Signal`.
.. seealso:: :func:`acoustics.signal.highpass`
"""
return type(self)(acoustics.signal.highpass(self, cutoff, self.fs, order=order, zero_phase=zero_phase), self.fs)
def lowpass(self, cutoff, order=4, zero_phase=False):
"""Filter signal with low-pass filter.
:param cutoff: Cornerfrequency.
:param order: Filter order.
:param zero_phase: Prevent phase error by filtering in both directions (filtfilt).
:returns: Low-pass filtered signal.
:rtype: :class:`Signal`.
.. seealso:: :func:`acoustics.signal.lowpass`
"""
return type(self)(acoustics.signal.lowpass(self, cutoff, self.fs, order=order, zero_phase=zero_phase), self.fs)
def octavepass(self, center, fraction, order=8, zero_phase=False):
"""Filter signal with fractional-octave band-pass filter.
:param center: Center frequency. Any value in the band will suffice.
:param fraction: Band designator.
:param order: Filter order.
:param zero_phase: Prevent phase error by filtering in both directions (filtfilt).
:returns: Band-pass filtered signal.
:rtype: :class:`Signal`.
.. seealso:: :func:`acoustics.signal.octavepass`
"""
return type(self)(acoustics.signal.octavepass(self, center, self.fs, fraction=fraction, order=order,
zero_phase=zero_phase), self.fs)
def bandpass_frequencies(self, frequencies, order=8, purge=True, zero_phase=False):
"""Apply bandpass filters for frequencies.
:param frequencies: Band-pass filter frequencies.
:type frequencies: Instance of :class:`acoustics.signal.Frequencies`
:param order: Filter order.
:param purge: Discard bands of which the upper corner frequency is above the Nyquist frequency.
:param zero_phase: Prevent phase error by filtering in both directions (filtfilt).
:returns: Frequencies and band-pass filtered signal.
.. seealso:: :func:`acoustics.signal.bandpass_frequencies`
"""
frequencies, filtered = acoustics.signal.bandpass_frequencies(self, self.fs, frequencies, order, purge,
zero_phase=zero_phase)
return frequencies, type(self)(filtered, self.fs)
def octaves(self, frequencies=NOMINAL_OCTAVE_CENTER_FREQUENCIES, order=8, purge=True, zero_phase=False):
"""Apply 1/1-octaves bandpass filters.
:param frequencies: Band-pass filter frequencies.
:type frequencies: :class:`np.ndarray` with (approximate) center-frequencies or an instance of :class:`acoustics.signal.Frequencies`
:param order: Filter order.
:param purge: Discard bands of which the upper corner frequency is above the Nyquist frequency.
:param zero_phase: Prevent phase error by filtering in both directions (filtfilt).
:returns: Frequencies and band-pass filtered signal.
.. seealso:: :func:`acoustics.signal.bandpass_octaves`
"""
frequencies, octaves = acoustics.signal.bandpass_octaves(self, self.fs, frequencies, order, purge,
zero_phase=zero_phase)
return frequencies, type(self)(octaves, self.fs)
def third_octaves(self, frequencies=NOMINAL_THIRD_OCTAVE_CENTER_FREQUENCIES, order=8, purge=True, zero_phase=False):
"""Apply 1/3-octaves bandpass filters.
:param frequencies: Band-pass filter frequencies.
:type frequencies: :class:`np.ndarray` with (approximate) center-frequencies or an instance of :class:`acoustics.signal.Frequencies`
:param order: Filter order.
:param purge: Discard bands of which the upper corner frequency is above the Nyquist frequency.
:param zero_phase: Prevent phase error by filtering in both directions (filtfilt).
:returns: Frequencies and band-pass filtered signal.
.. seealso:: :func:`acoustics.signal.bandpass_third_octaves`
"""
frequencies, octaves = acoustics.signal.bandpass_third_octaves(self, self.fs, frequencies, order, purge,
zero_phase=zero_phase)
return frequencies, type(self)(octaves, self.fs)
def fractional_octaves(self, frequencies=None, fraction=1, order=8, purge=True, zero_phase=False):
"""Apply 1/N-octaves bandpass filters.
:param frequencies: Band-pass filter frequencies.
:type frequencies: Instance of :class:`acoustics.signal.Frequencies`
:param fraction: Default band-designator of fractional-octaves.
:param order: Filter order.
:param purge: Discard bands of which the upper corner frequency is above the Nyquist frequency.
:param zero_phase: Prevent phase error by filtering in both directions (filtfilt).
:returns: Frequencies and band-pass filtered signal.
.. seealso:: :func:`acoustics.signal.bandpass_fractional_octaves`
"""
if frequencies is None:
frequencies = acoustics.signal.OctaveBand(fstart=NOMINAL_THIRD_OCTAVE_CENTER_FREQUENCIES[0],
fstop=self.fs / 2.0, fraction=fraction)
frequencies, octaves = acoustics.signal.bandpass_fractional_octaves(self, self.fs, frequencies, fraction, order,
purge, zero_phase=zero_phase)
return frequencies, type(self)(octaves, self.fs)
def plot_octaves(self, **kwargs):
"""Plot octaves.
.. seealso:: :meth:`octaves`
"""
params = {
'xscale': 'log',
'yscale': 'linear',
'xlabel': '$f$ in Hz',
'ylabel': '$L_{p}$ in dB',
'title': '1/1-Octaves SPL',
}
params.update(kwargs)
f, o = self.octaves()
print(len(f.center), len(o.leq()))
return _base_plot(f.center, o.leq().T, params)
def plot_third_octaves(self, **kwargs):
"""Plot 1/3-octaves.
.. seealso:: :meth:`third_octaves`
"""
params = {
'xscale': 'log',
'yscale': 'linear',
'xlabel': '$f$ in Hz',
'ylabel': '$L_{p}$ in dB',
'title': '1/3-Octaves SPL',
}
params.update(kwargs)
f, o = self.third_octaves()
return _base_plot(f.center, o.leq().T, params)
def plot_fractional_octaves(self, frequencies=None, fraction=1, order=8, purge=True, zero_phase=False, **kwargs):
"""Plot fractional octaves.
"""
title = '1/{}-Octaves SPL'.format(fraction)
params = {
'xscale': 'log',
'yscale': 'linear',
'xlabel': '$f$ in Hz',
'ylabel': '$L_p$ in dB',
'title': title,
}
params.update(kwargs)
f, o = self.fractional_octaves(frequencies=frequencies, fraction=fraction, order=order, purge=purge,
zero_phase=zero_phase)
return _base_plot(f.center, o.leq().T, params)
def plot(self, **kwargs):
"""Plot signal as function of time. By default the entire signal is plotted.
:param filename: Name of file.
:param start: First sample index.
:type start: Start time in seconds from start of signal.
:param stop: Last sample index.
:type stop: Stop time in seconds. from stop of signal.
"""
params = {
'xscale': 'linear',
'yscale': 'linear',
'xlabel': '$t$ in s',
'ylabel': '$x$ in -',
'title': 'Signal',
}
params.update(kwargs)
return _base_plot(self.times(), self, params)
#def plot_scalo(self, filename=None):
#"""
#Plot scalogram
#"""
#from scipy.signal import ricker, cwt
#wavelet = ricker
#widths = np.logspace(-1, 3.5, 10)
#x = cwt(self, wavelet, widths)
#interpolation = 'nearest'
#from matplotlib.ticker import LinearLocator, AutoLocator, MaxNLocator
#majorLocator = LinearLocator()
#majorLocator = MaxNLocator()
#fig = plt.figure()
#ax = fig.add_subplot(111)
#ax.set_title('Scaleogram')
##ax.set_xticks(np.arange(0, x.shape[1])*self.fs)
##ax.xaxis.set_major_locator(majorLocator)
##ax.imshow(10.0 * np.log10(x**2.0), interpolation=interpolation, aspect='auto', origin='lower')#, extent=[0, 1, 0, len(x)])
#ax.pcolormesh(np.arange(0.0, x.shape[1])/self.fs, widths, 10.0*np.log(x**2.0))
#if filename:
#fig.savefig(filename)
#else:
#return fig
#def plot_scaleogram(self, filename):
#"""
#Plot scaleogram
#"""
#import pywt
#wavelet = 'dmey'
#level = pywt.dwt_max_level(len(self), pywt.Wavelet(wavelet))
#print level
#level = 20
#order = 'freq'
#interpolation = 'nearest'
#wp = pywt.WaveletPacket(self, wavelet, 'sym', maxlevel=level)
#nodes = wp.get_level(level, order=order)
#labels = [n.path for n in nodes]
#values = np.abs(np.array([n.data for n in nodes], 'd'))
#fig = plt.figure()
#ax = fig.add_subplot(111)
#ax.set_title('Scaleogram')
#ax.imshow(values, interpolation=interpolation, aspect='auto', origin='lower', extent=[0, 1, 0, len(values)])
##ax.set_yticks(np.arange(0.5, len(labels) + 0.5))
##ax.set_yticklabels(labels)
#fig.savefig(filename)
def normalize(self, gap=6.0, inplace=False):
"""Normalize signal.
:param gap: Gap between maximum value and ceiling in decibel.
:param inplace: Normalize signal in place.
The parameter `gap` can be understood as using `gap` decibels fewer for the dynamic range.
By default a 6 decibel gap is used.
"""
factor = (self.max() * 10.0**(gap / 20.0))
if inplace:
self /= factor
return self
else:
return self / factor
def to_wav(self, filename, depth=16):
"""Save signal as WAV file.
:param filename: Name of file to save to.
:param depth: If given, convert to integer with specified depth. Else, try to store using the original data type.
By default, this function saves a normalized 16-bit version of the signal with at least 6 dB range till clipping occurs.
"""
data = self
dtype = data.dtype if not depth else 'int' + str(depth)
if depth:
data = (data * 2**(depth - 1) - 1).astype(dtype)
wavfile.write(filename, int(self.fs), data.T)
#wavfile.write(filename, int(self.fs), self._data/np.abs(self._data).max() * 0.5)
#wavfile.write(filename, int(self.fs), np.int16(self._data/(np.abs(self._data).max()) * 32767) )
@classmethod
def from_wav(cls, filename):
"""
Create an instance of `Signal` from a WAV file.
:param filename: Filename
"""
fs, data = wavfile.read(filename)
data = data.astype(np.float32, copy=False).T
data /= np.max(np.abs(data))
return cls(data, fs=fs)
_PLOTTING_PARAMS = {
'title': None,
'xlabel': None,
'ylabel': None,
'xscale': 'linear',
'yscale': 'linear',
'xlim': (None, None),
'ylim': (None, None),
'labels': None,
'linestyles': ['-', '-.', '--', ':'],
}
def _get_plotting_params():
d = dict()
d.update(_PLOTTING_PARAMS)
return d
def _base_plot(x, y, given_params):
"""Common function for creating plots.
:returns: Axes object.
:rtype: :class:`matplotlib.Axes`
"""
params = _get_plotting_params()
params.update(given_params)
linestyles = itertools.cycle(iter(params['linestyles']))
# Check if an axes object is passed in. Otherwise, create one.
ax0 = params.get('ax', plt.figure().add_subplot(111))
ax0.set_title(params['title'])
if y.ndim > 1:
for channel in y:
ax0.plot(x, channel, linestyle=next(linestyles))
else:
ax0.plot(x, y)
ax0.set_xlabel(params['xlabel'])
ax0.set_ylabel(params['ylabel'])
ax0.set_xscale(params['xscale'])
ax0.set_yscale(params['yscale'])
ax0.set_xlim(params['xlim'])
ax0.set_ylim(params['ylim'])
if params['labels'] is None and y.ndim > 1:
params['labels'] = np.arange(y.shape[-2]) + 1
if params['labels'] is not None:
ax0.legend(labels=params['labels'])
return ax0
__all__ = ["Signal"]
| bsd-3-clause |
udacity/ggplot | ggplot/components/smoothers.py | 12 | 2576 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from pandas.lib import Timestamp
import pandas as pd
import statsmodels.api as sm
from statsmodels.nonparametric.smoothers_lowess import lowess as smlowess
from statsmodels.sandbox.regression.predstd import wls_prediction_std
from statsmodels.stats.outliers_influence import summary_table
import scipy.stats as stats
_isdate = lambda x: isinstance(x, Timestamp)
SPAN = 2/3.
ALPHA = 0.05 # significance level for confidence interval
def snakify(txt):
txt = txt.strip().lower()
return '_'.join(txt.split())
def plot_friendly(value):
if not isinstance(value, (np.ndarray, pd.Series)):
value = pd.Series(value)
return value
def lm(x, y, alpha=ALPHA):
"fits an OLS from statsmodels. returns tuple."
x, y = map(plot_friendly, [x,y])
if _isdate(x[0]):
x = np.array([i.toordinal() for i in x])
X = sm.add_constant(x)
fit = sm.OLS(y, X).fit()
prstd, iv_l, iv_u = wls_prediction_std(fit)
_, summary_values, summary_names = summary_table(fit, alpha=alpha)
df = pd.DataFrame(summary_values, columns=map(snakify, summary_names))
fittedvalues = df['predicted_value']
predict_mean_se = df['std_error_mean_predict']
predict_mean_ci_low = df['mean_ci_95%_low']
predict_mean_ci_upp = df['mean_ci_95%_upp']
predict_ci_low = df['predict_ci_95%_low']
predict_ci_upp = df['predict_ci_95%_upp']
return (x, fittedvalues.tolist(), predict_mean_ci_low.tolist(),
predict_mean_ci_upp.tolist())
def lowess(x, y, span=SPAN):
"returns y-values estimated using the lowess function in statsmodels."
"""
for more see
statsmodels.nonparametric.smoothers_lowess.lowess
"""
x, y = map(plot_friendly, [x,y])
if _isdate(x[0]):
x = np.array([i.toordinal() for i in x])
result = smlowess(np.array(y), np.array(x), frac=span)
x = pd.Series(result[::,0])
y = pd.Series(result[::,1])
lower, upper = stats.t.interval(span, len(x), loc=0, scale=2)
std = np.std(y)
y1 = pd.Series(lower * std + y).tolist()
y2 = pd.Series(upper * std + y).tolist()
return (x, y, y1, y2)
def mavg(x,y, window):
"compute moving average"
x, y = map(plot_friendly, [x,y])
if _isdate(x[0]):
x = np.array([i.toordinal() for i in x])
std_err = pd.rolling_std(y, window)
y = pd.rolling_mean(y, window)
y1 = y - std_err
y2 = y + std_err
return (x, y, y1.tolist(), y2.tolist())
| bsd-2-clause |
appapantula/scikit-learn | sklearn/utils/validation.py | 67 | 24013 | """Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from inspect import getargspec
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
class DataConversionWarning(UserWarning):
"""A warning on implicit data conversions happening in the code"""
pass
warnings.simplefilter("always", DataConversionWarning)
class NonBLASDotWarning(UserWarning):
"""A warning on implicit dispatch to numpy.dot"""
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
"""
# Silenced by default to reduce verbosity. Turn on at runtime for
# performance profiling.
warnings.simplefilter('ignore', NonBLASDotWarning)
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
_assert_all_finite(X.data if sp.issparse(X) else X)
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
return X.astype(np.float32 if X.dtype == np.int32 else np.float64)
def _is_arraylike(x):
"""Returns whether the input is array-like"""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit'):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def _shape_repr(shape):
"""Return a platform independent reprensentation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
uniques = np.unique([_num_samples(X) for X in arrays if X is not None])
if len(uniques) > 1:
raise ValueError("Found arrays with inconsistent numbers of samples: "
"%s" % str(uniques))
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if accept_sparse in [None, False]:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
if dtype is None:
dtype = spmatrix.dtype
changed_format = False
if (isinstance(accept_sparse, (list, tuple))
and spmatrix.format not in accept_sparse):
# create new with correct sparse
spmatrix = spmatrix.asformat(accept_sparse[0])
changed_format = True
if dtype != spmatrix.dtype:
# convert dtype
spmatrix = spmatrix.astype(dtype)
elif copy and not changed_format:
# force copy
spmatrix = spmatrix.copy()
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
return spmatrix
def check_array(array, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1,
warn_on_dtype=False, estimator=None):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2nd numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# store whether originally we wanted numeric dtype
dtype_numeric = dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not hasattr(dtype_orig, 'kind'):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dtype, copy,
force_all_finite)
else:
if ensure_2d:
array = np.atleast_2d(array)
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we actually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. Expected <= 2" %
array.ndim)
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required."
% (n_samples, shape_repr, ensure_min_samples))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required."
% (n_features, shape_repr, ensure_min_features))
if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig:
msg = ("Data with input dtype %s was converted to %s"
% (dtype_orig, array.dtype))
if estimator is not None:
if not isinstance(estimator, six.string_types):
estimator = estimator.__class__.__name__
msg += " by %s" % estimator
warnings.warn(msg, DataConversionWarning)
return array
def check_X_y(X, y, accept_sparse=None, dtype="numeric", order=None, copy=False,
force_all_finite=True, ensure_2d=True, allow_nd=False,
multi_output=False, ensure_min_samples=1,
ensure_min_features=1, y_numeric=False,
warn_on_dtype=False, estimator=None):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X 2d and y 1d.
Standard input checks are only applied to y. For multi-label y,
set multi_output=True to allow 2d and sparse y.
If the dtype of X is object, attempt converting to float,
raising on failure.
Parameters
----------
X : nd-array, list or sparse matrix
Input data.
y : nd-array, list or sparse matrix
Labels.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2-d y (array or sparse matrix). If false, y will be
validated as a vector.
ensure_min_samples : int (default=1)
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : boolean (default=False)
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,
ensure_2d, allow_nd, ensure_min_samples,
ensure_min_features, warn_on_dtype, estimator)
if multi_output:
y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False,
dtype=None)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
if y_numeric and y.dtype.kind == 'O':
y = y.astype(np.float64)
check_consistent_length(X, y)
return X, y
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Examples
--------
>>> from sklearn.svm import SVC
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in getargspec(estimator.fit)[0]
def check_symmetric(array, tol=1E-10, raise_warning=True,
raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : nd-array or sparse matrix
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : boolean (default=True)
If True then raise a warning if conversion is required.
raise_exception : boolean (default=False)
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : ndarray or sparse matrix
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError("array must be 2-dimensional and square. "
"shape = {0}".format(array.shape))
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ['csr', 'csc', 'coo']:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn("Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.")
if sp.issparse(array):
conversion = 'to' + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg. : ["coef_", "estimator_", ...], "coef_"
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
raise NotFittedError(msg % {'name': type(estimator).__name__})
def check_non_negative(X, whom):
"""
Check if there is any negative value in an array.
Parameters
----------
X : array-like or sparse matrix
Input data.
whom : string
Who passed X to this function.
"""
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
| bsd-3-clause |
zedoul/AnomalyDetection | test_discretization/test_spectralclustering3.py | 1 | 8781 | # -*- coding: utf-8 -*-
"""
http://www.astroml.org/sklearn_tutorial/dimensionality_reduction.html
"""
print (__doc__)
import numpy as np
import copy
import matplotlib
import matplotlib.mlab
import matplotlib.pyplot as plt
from matplotlib import gridspec
from sklearn.cluster import KMeans
import pandas as pd
import nslkdd.preprocessing as preprocessing
import nslkdd.data.model as model
from nslkdd.get_kdd_dataframe import attack_types
from nslkdd.get_kdd_dataframe import df_by_attack_type
import sugarbee.reduction as reduction
import sugarbee.distance as distance
import sugarbee.affinity as affinity
import sugarbee.solver as solver
from autosp import predict_k
from sklearn.cluster import SpectralClustering
import colorhex
plot_lim_max = 30
plot_lim_min = -30
def test_clustering(df, gmms, title="", save_to_file=False, point=None):
df_train = copy.deepcopy(df)
true_values = df_train["attack"].values.tolist()
df_train.drop('attack',1,inplace=True)
df_train.drop('difficulty',1,inplace=True)
# print "reductioning..."
proj = reduction.gmm_reduction(df_train, headers, gmms)
cproj = copy.deepcopy(proj)
# print "plotting..."
data_per_true_labels = []
for i in range( len(attacks) ):
data_per_true_labels.append([])
true_attack_types = df["attack"].values.tolist()
for i, d in enumerate(cproj):
data_per_true_labels[true_attack_types[i]].append(d)
fig, axarr = plt.subplots(3, 4, sharex='col', sharey='row')
plt.subplots_adjust(wspace=0.4, hspace=0.4)
plt.xlim(plot_lim_min, plot_lim_max)
plt.ylim(plot_lim_min, plot_lim_max)
ax1 = axarr[0, 0]
ax2 = axarr[0, 1]
ax3 = axarr[0, 2]
ax4 = axarr[0, 3]
ax5 = axarr[1, 0]
ax6 = axarr[1, 1]
ax7 = axarr[1, 2]
ax8 = axarr[1, 3]
ax9 = axarr[2, 0]
ax10 = axarr[2, 1]
ax11 = axarr[2, 2]
ax12 = axarr[2, 3]
ax1.set_title("True labels")
for i, p in enumerate(data_per_true_labels) :
x = [t[0] for t in p]
y = [t[1] for t in p]
x = np.array(x)
y = np.array(y)
colors = []
if point == None :
if i == model.attack_normal:
colors.append('g')
else :
colors.append('r')
# for _ in range(len(x)):
# colors.append(colorhex.codes[i])
else :
for _ in range(len(x)):
if i == point :
colors.append(colorhex.codes[i])
elif i == model.attack_normal:
colors.append('g')
else :
colors.append('r')
ax1.scatter(x, y, c=colors)
##############################################################
ax2.set_title("True normal")
for i, p in enumerate(data_per_true_labels) :
x = [t[0] for t in p]
y = [t[1] for t in p]
x = np.array(x)
y = np.array(y)
if i == model.attack_normal:
ax2.scatter(x, y, c='g')
##############################################################
ax3.set_title("True abnormal")
for i, p in enumerate(data_per_true_labels) :
x = [t[0] for t in p]
y = [t[1] for t in p]
x = np.array(x)
y = np.array(y)
if i != model.attack_normal:
ax3.scatter(x, y, c='r')
##############################################################
# A = affinity.get_affinity_matrix(proj, metric_method=distance.dist, metric_param='euclidean', knn=8)
A = affinity.get_affinity_matrix(proj, metric_method=distance.cosdist, knn=8)
# D = affinity.get_degree_matrix(A)
# L = affinity.get_laplacian_matrix(A,D)
# X = solver.solve(L)
# est = KMeans(n_clusters=k)
# est.fit(cproj)
# res = est.labels_
k = predict_k(A)
print "supposed k : " + str(k)
lim = int(len(df) * 0.1)
if k == 1 :
k = lim
if k > lim :
k = lim
print "Total number of clusters : " + str(k)
sc = SpectralClustering(n_clusters=k,
affinity="precomputed",
assign_labels="kmeans").fit(A)
res = sc.labels_
# print "The results : "
# print res
##############################################################
"""
what is this?
"""
true_attack_types = df["attack"].values.tolist()
clusters = [0] * k
for i, p in enumerate(cproj):
true_label = true_attack_types[i]
if true_label == model.attack_normal :
clusters[ res[i] ] = clusters[ res[i] ] + 1
else :
clusters[ res[i] ] = clusters[ res[i] ] - 1
##############################################################
ax4.set_title("k-means")
for i, p in enumerate(cproj):
ax4.scatter(p[0], p[1], c=colorhex.codes[ res[i] ])
##############################################################
ax5.set_title("Normal res")
for i, p in enumerate(cproj):
if clusters[ res[i]] >= 0 :
ax5.scatter(p[0], p[1], c='b')
##############################################################
ax6.set_title("Abnormal res")
for i, p in enumerate(cproj):
if clusters[ res[i] ] < 0 :
ax6.scatter(p[0], p[1], c='r')
##############################################################
print res
ax7.set_title("Cluster 1")
for i, p in enumerate(cproj):
if res[i] == 0 :
ax7.scatter(p[0], p[1], c='g')
##############################################################
ax8.set_title("Cluster 2")
for i, p in enumerate(cproj):
if res[i] == 1 :
ax8.scatter(p[0], p[1], c='g')
##############################################################
ax9.set_title("Cluster 3")
for i, p in enumerate(cproj):
if res[i] == 2 :
ax9.scatter(p[0], p[1], c='g')
##############################################################
ax10.set_title("Cluster 4")
for i, p in enumerate(cproj):
if res[i] == 3 :
ax10.scatter(p[0], p[1], c='g')
##############################################################
ax11.set_title("Cluster 5")
for i, p in enumerate(cproj):
if res[i] == 4 :
ax11.scatter(p[0], p[1], c='g')
##############################################################
ax12.set_title("Cluster 6")
for i, p in enumerate(cproj):
if res[i] == 5 :
ax12.scatter(p[0], p[1], c='g')
##############################################################
# confusion matrix
y_true = []
y_pred = []
for i in true_values :
if i == model.attack_normal :
y_true.append(0)
else :
y_true.append(1)
for i in res :
if clusters[i] >= 0 :
y_pred.append(0)
else :
y_pred.append(1)
from sklearn.metrics import confusion_matrix
m = confusion_matrix(list(y_true), list(y_pred))
s1 = m[0][0] + m[0][1]
s2 = m[1][1] + m[1][0]
print m
print "true_positive : " + str(m[0][0]) + " (" + str(m[0][0]*1.0 / s1) + ")"
print "true_negative : " + str(m[1][1]) + " (" + str(m[1][1]*1.0 / s2) + ")"
print "false_positive : " + str(m[1][0]) + " (" + str(m[1][0]*1.0 / s2) + ")"
print "false_negative : " + str(m[0][1]) + " (" + str(m[0][1]*1.0 / s1) + ")"
if save_to_file == True :
fig.savefig("./plots/results/"+title+".png")
else :
plt.show()
plt.close()
if __name__ == '__main__':
import time
start = time.time()
headers, attacks = preprocessing.get_header_data()
headers.remove('protocol_type')
headers.remove('attack')
headers.remove('difficulty')
df_training_20, df_training_full, gmms_20, gmms_full = preprocessing.get_preprocessed_training_data()
df_test_20, df_test_full, gmms_test_20, gmms_test_full = preprocessing.get_preprocessed_test_data()
# with training-set
df1 = df_training_20[0:500]
gmms = gmms_20
title = "training20_only"
print "#################################################"
print title
test_clustering(df1, gmms, title=title, save_to_file=True)
# with test-set
dataset_description = "training20_test20"
for attack_type_index, attack_type in enumerate(model.attack_types) :
if attack_type_index <= model.attack_normal :
continue
df2 = df_by_attack_type(df_test_20, attack_type_index)
df2 = df2[0:50]
df = pd.concat([df1, df2])
title = dataset_description + "_" + attack_type
print "#################################################"
print title
print len(df1)
print len(df2)
test_clustering(df, gmms, title=title, save_to_file=True, point=attack_type_index)
elapsed = (time.time() - start)
print "done in %s seconds" % (elapsed)
| mit |
cactusbin/nyt | matplotlib/lib/mpl_toolkits/mplot3d/proj3d.py | 7 | 6832 | #!/usr/bin/python
# 3dproj.py
#
"""
Various transforms used for by the 3D code
"""
from matplotlib.collections import LineCollection
from matplotlib.patches import Circle
import numpy as np
import numpy.linalg as linalg
def line2d(p0, p1):
"""
Return 2D equation of line in the form ax+by+c = 0
"""
# x + x1 = 0
x0, y0 = p0[:2]
x1, y1 = p1[:2]
#
if x0 == x1:
a = -1
b = 0
c = x1
elif y0 == y1:
a = 0
b = 1
c = -y1
else:
a = (y0-y1)
b = (x0-x1)
c = (x0*y1 - x1*y0)
return a, b, c
def line2d_dist(l, p):
"""
Distance from line to point
line is a tuple of coefficients a,b,c
"""
a, b, c = l
x0, y0 = p
return abs((a*x0 + b*y0 + c)/np.sqrt(a**2+b**2))
def line2d_seg_dist(p1, p2, p0):
"""distance(s) from line defined by p1 - p2 to point(s) p0
p0[0] = x(s)
p0[1] = y(s)
intersection point p = p1 + u*(p2-p1)
and intersection point lies within segment if u is between 0 and 1
"""
x21 = p2[0] - p1[0]
y21 = p2[1] - p1[1]
x01 = np.asarray(p0[0]) - p1[0]
y01 = np.asarray(p0[1]) - p1[1]
u = (x01*x21 + y01*y21)/float(abs(x21**2 + y21**2))
u = np.clip(u, 0, 1)
d = np.sqrt((x01 - u*x21)**2 + (y01 - u*y21)**2)
return d
def test_lines_dists():
import pylab
ax = pylab.gca()
xs, ys = (0,30), (20,150)
pylab.plot(xs, ys)
points = zip(xs, ys)
p0, p1 = points
xs, ys = (0,0,20,30), (100,150,30,200)
pylab.scatter(xs, ys)
dist = line2d_seg_dist(p0, p1, (xs[0], ys[0]))
dist = line2d_seg_dist(p0, p1, np.array((xs, ys)))
for x, y, d in zip(xs, ys, dist):
c = Circle((x, y), d, fill=0)
ax.add_patch(c)
pylab.xlim(-200, 200)
pylab.ylim(-200, 200)
pylab.show()
def mod(v):
"""3d vector length"""
return np.sqrt(v[0]**2+v[1]**2+v[2]**2)
def world_transformation(xmin, xmax,
ymin, ymax,
zmin, zmax):
dx, dy, dz = (xmax-xmin), (ymax-ymin), (zmax-zmin)
return np.array([
[1.0/dx,0,0,-xmin/dx],
[0,1.0/dy,0,-ymin/dy],
[0,0,1.0/dz,-zmin/dz],
[0,0,0,1.0]])
def test_world():
xmin, xmax = 100, 120
ymin, ymax = -100, 100
zmin, zmax = 0.1, 0.2
M = world_transformation(xmin, xmax, ymin, ymax, zmin, zmax)
print M
def view_transformation(E, R, V):
n = (E - R)
## new
# n /= mod(n)
# u = np.cross(V,n)
# u /= mod(u)
# v = np.cross(n,u)
# Mr = np.diag([1.]*4)
# Mt = np.diag([1.]*4)
# Mr[:3,:3] = u,v,n
# Mt[:3,-1] = -E
## end new
## old
n = n / mod(n)
u = np.cross(V, n)
u = u / mod(u)
v = np.cross(n, u)
Mr = [[u[0],u[1],u[2],0],
[v[0],v[1],v[2],0],
[n[0],n[1],n[2],0],
[0, 0, 0, 1],
]
#
Mt = [[1, 0, 0, -E[0]],
[0, 1, 0, -E[1]],
[0, 0, 1, -E[2]],
[0, 0, 0, 1]]
## end old
return np.dot(Mr, Mt)
def persp_transformation(zfront, zback):
a = (zfront+zback)/(zfront-zback)
b = -2*(zfront*zback)/(zfront-zback)
return np.array([[1,0,0,0],
[0,1,0,0],
[0,0,a,b],
[0,0,-1,0]
])
def proj_transform_vec(vec, M):
vecw = np.dot(M, vec)
w = vecw[3]
# clip here..
txs, tys, tzs = vecw[0]/w, vecw[1]/w, vecw[2]/w
return txs, tys, tzs
def proj_transform_vec_clip(vec, M):
vecw = np.dot(M, vec)
w = vecw[3]
# clip here..
txs, tys, tzs = vecw[0]/w, vecw[1]/w, vecw[2]/w
tis = (vecw[0] >= 0) * (vecw[0] <= 1) * (vecw[1] >= 0) * (vecw[1] <= 1)
if np.sometrue(tis):
tis = vecw[1] < 1
return txs, tys, tzs, tis
def inv_transform(xs, ys, zs, M):
iM = linalg.inv(M)
vec = vec_pad_ones(xs, ys, zs)
vecr = np.dot(iM, vec)
try:
vecr = vecr/vecr[3]
except OverflowError:
pass
return vecr[0], vecr[1], vecr[2]
def vec_pad_ones(xs, ys, zs):
try:
try:
vec = np.array([xs,ys,zs,np.ones(xs.shape)])
except (AttributeError,TypeError):
vec = np.array([xs,ys,zs,np.ones((len(xs)))])
except TypeError:
vec = np.array([xs,ys,zs,1])
return vec
def proj_transform(xs, ys, zs, M):
"""
Transform the points by the projection matrix
"""
vec = vec_pad_ones(xs, ys, zs)
return proj_transform_vec(vec, M)
def proj_transform_clip(xs, ys, zs, M):
"""
Transform the points by the projection matrix
and return the clipping result
returns txs,tys,tzs,tis
"""
vec = vec_pad_ones(xs, ys, zs)
return proj_transform_vec_clip(vec, M)
transform = proj_transform
def proj_points(points, M):
return zip(*proj_trans_points(points, M))
def proj_trans_points(points, M):
xs, ys, zs = zip(*points)
return proj_transform(xs, ys, zs, M)
def proj_trans_clip_points(points, M):
xs, ys, zs = zip(*points)
return proj_transform_clip(xs, ys, zs, M)
def test_proj_draw_axes(M, s=1):
import pylab
xs, ys, zs = [0, s, 0, 0], [0, 0, s, 0], [0, 0, 0, s]
txs, tys, tzs = proj_transform(xs, ys, zs, M)
o, ax, ay, az = (txs[0], tys[0]), (txs[1], tys[1]), \
(txs[2], tys[2]), (txs[3], tys[3])
lines = [(o, ax), (o, ay), (o, az)]
ax = pylab.gca()
linec = LineCollection(lines)
ax.add_collection(linec)
for x, y, t in zip(txs, tys, ['o', 'x', 'y', 'z']):
pylab.text(x, y, t)
def test_proj_make_M(E=None):
# eye point
E = E or np.array([1, -1, 2]) * 1000
#E = np.array([20,10,20])
R = np.array([1, 1, 1]) * 100
V = np.array([0, 0, 1])
viewM = view_transformation(E, R, V)
perspM = persp_transformation(100, -100)
M = np.dot(perspM, viewM)
return M
def test_proj():
import pylab
M = test_proj_make_M()
ts = ['%d' % i for i in [0,1,2,3,0,4,5,6,7,4]]
xs, ys, zs = [0,1,1,0,0, 0,1,1,0,0], [0,0,1,1,0, 0,0,1,1,0], \
[0,0,0,0,0, 1,1,1,1,1]
xs, ys, zs = [np.array(v)*300 for v in (xs, ys, zs)]
#
test_proj_draw_axes(M, s=400)
txs, tys, tzs = proj_transform(xs, ys, zs, M)
ixs, iys, izs = inv_transform(txs, tys, tzs, M)
pylab.scatter(txs, tys, c=tzs)
pylab.plot(txs, tys, c='r')
for x, y, t in zip(txs, tys, ts):
pylab.text(x, y, t)
pylab.xlim(-0.2, 0.2)
pylab.ylim(-0.2, 0.2)
pylab.show()
def rot_x(V, alpha):
cosa, sina = np.cos(alpha), np.sin(alpha)
M1 = np.array([[1,0,0,0],
[0,cosa,-sina,0],
[0,sina,cosa,0],
[0,0,0,0]])
return np.dot(M1, V)
def test_rot():
V = [1,0,0,1]
print rot_x(V, np.pi/6)
V = [0,1,0,1]
print rot_x(V, np.pi/6)
if __name__ == "__main__":
test_proj()
| unlicense |
bsautermeister/machine-learning-examples | dnn_classification/tf_learn/custom_dnn_abalone.py | 1 | 8357 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import argparse
import tempfile
from six.moves import urllib
import numpy as np
import tensorflow as tf
import sklearn.metrics
tf.logging.set_verbosity(tf.logging.INFO)
def maybe_download(train_data, test_data, predict_data):
"""Maybe downloads training data and returns train and test file names."""
def _download(url):
tmp_file = tempfile.NamedTemporaryFile(delete=False)
urllib.request.urlretrieve(url, tmp_file.name)
file_name = tmp_file.name
tmp_file.close()
return file_name
if train_data:
train_file_name = train_data
else:
train_file_name = _download("http://download.tensorflow.org/data/abalone_train.csv")
print("Training data is downloaded to %s" % train_file_name)
if test_data:
test_file_name = test_data
else:
test_file_name = _download("http://download.tensorflow.org/data/abalone_test.csv")
print("Test data is downloaded to %s" % test_file_name)
if predict_data:
predict_file_name = predict_data
else:
predict_file_name = _download("http://download.tensorflow.org/data/abalone_predict.csv")
print("Prediction data is downloaded to %s" % predict_file_name)
return train_file_name, test_file_name, predict_file_name
class CustomModel:
LEARNING_RATE_KEY = "learning_rate"
DROPOUT_KEY = "dropout"
def __init__(self, learning_rate, dropout):
self.learning_rate = learning_rate
self.dropout = dropout
def params(self):
return {
self.LEARNING_RATE_KEY: self.learning_rate,
self.DROPOUT_KEY: self.dropout
}
def build(self):
def _build(features, labels, mode, params):
# 1. Configure the model via TensorFlow operations
# input_layer = tf.contrib.layers.input_from_feature_columns(
# columns_to_tensors=features, feature_columns=[age, height, weight])
y = tf.contrib.layers.fully_connected(features, num_outputs=10, activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer())
y = tf.contrib.layers.fully_connected(y, num_outputs=10, activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer())
y = tf.contrib.layers.fully_connected(y, num_outputs=10, activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer())
keep_prob = params[self.DROPOUT_KEY] if mode == tf.estimator.ModeKeys.TRAIN else 1.0
y = tf.nn.dropout(y, keep_prob=keep_prob)
y = tf.contrib.layers.fully_connected(y, num_outputs=1, activation_fn=tf.nn.sigmoid,
weights_initializer=tf.contrib.layers.xavier_initializer())
# Reshape output layer to 1-dim Tensor to return predictions
predictions = tf.reshape(y, [-1])
# 2. Define the loss function for training/evaluation
loss = None
eval_metric_ops = None
if mode != tf.estimator.ModeKeys.PREDICT:
loss = tf.losses.mean_squared_error(labels, predictions)
reshaped_labels = tf.reshape(labels, [-1])
reshaped_labels = tf.cast(reshaped_labels, tf.float32)
reshaped_preds = tf.reshape(predictions, [-1])
reshaped_preds = tf.round(reshaped_preds)
eval_metric_ops = {
"rmse": tf.metrics.root_mean_squared_error(
tf.cast(labels, tf.float32), predictions),
"accuracy": tf.metrics.accuracy(reshaped_labels, reshaped_preds),
"precision": tf.metrics.precision(reshaped_labels, reshaped_preds)
}
# 3. Define the training operation/optimizer
train_op = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = tf.contrib.layers.optimize_loss(
loss=loss,
global_step=tf.contrib.framework.get_global_step(),
learning_rate=params[self.LEARNING_RATE_KEY],
optimizer='SGD')
# 4. Generate predictions
predictions_dict = None
if mode == tf.estimator.ModeKeys.PREDICT:
predictions_dict = {'ages': predictions}
# 5. Return predictions/loss/train_op/eval_metric_ops in ModelFnOps object
return tf.estimator.EstimatorSpec(mode, predictions_dict, loss, train_op, eval_metric_ops)
return _build
def batched_input_fn(dataset_x, dataset_y, batch_size, num_epochs=None, shuffle=True):
def _input_fn():
all_x = tf.constant(dataset_x, shape=dataset_x.shape, dtype=tf.float32)
datasets = [all_x]
if dataset_y is not None:
all_y = tf.constant(dataset_y, shape=dataset_y.shape, dtype=tf.float32)
datasets.append(all_y)
sliced_input = tf.train.slice_input_producer(datasets, num_epochs=num_epochs, shuffle=shuffle)
return tf.train.batch(sliced_input, batch_size=batch_size, num_threads=4)
return _input_fn
def main(_):
# Load datasets
abalone_train, abalone_test, abalone_predict = maybe_download(
FLAGS.train_data, FLAGS.test_data, FLAGS.predict_data)
# Training examples
training_set = tf.contrib.learn.datasets.base.load_csv_without_header(
filename=abalone_train, target_dtype=np.int, features_dtype=np.float32)
# Test examples
test_set = tf.contrib.learn.datasets.base.load_csv_without_header(
filename=abalone_test, target_dtype=np.int, features_dtype=np.float32)
# Set of 7 examples for which to predict abalone_snail ages
prediction_set = tf.contrib.learn.datasets.base.load_csv_without_header(
filename=abalone_predict, target_dtype=np.int, features_dtype=np.float32)
m = CustomModel(FLAGS.learning_rate, FLAGS.keep_prob)
nn = tf.estimator.Estimator(model_fn=m.build(), params=m.params())
nn.train(input_fn=batched_input_fn(training_set.data, training_set.target,
batch_size=FLAGS.batch_size),
steps=FLAGS.train_steps)
ev = nn.evaluate(input_fn=batched_input_fn(test_set.data, test_set.target,
batch_size=FLAGS.batch_size, num_epochs=1))
print("Loss: {}".format(ev["loss"]))
print("RMSE: {}".format(ev["rmse"]))
# Print out predictions
pred_generator = nn.predict(input_fn=batched_input_fn(prediction_set.data, None,
batch_size=FLAGS.batch_size, num_epochs=1, shuffle=False))
pred_list = [p['ages'] for p in pred_generator]
pred_array = np.asarray(pred_list)
pred_array = np.round(pred_array)
cm = sklearn.metrics.confusion_matrix(y_true=prediction_set.target[:pred_array.shape[0]],
y_pred=pred_array)
print(cm)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument("--train_data", type=str, default="",
help="Path to the training data.")
parser.add_argument("--test_data", type=str, default="",
help="Path to the test data.")
parser.add_argument("--predict_data", type=str, default="",
help="Path to the prediction data.")
parser.add_argument("--learning_rate", type=float, default=0.001,
help="The initial learning rate.")
parser.add_argument("--train_steps", type=int, default=1000,
help="The number of training steps.")
parser.add_argument("--batch_size", type=int, default=64,
help="The batch size.")
parser.add_argument("--keep_prob", type=float, default=0.5,
help="The keep probability of the dropout layer.")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| mit |
ryfeus/lambda-packs | LightGBM_sklearn_scipy_numpy/source/sklearn/linear_model/setup.py | 83 | 1719 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('linear_model', parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension('cd_fast', sources=['cd_fast.pyx'],
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]), **blas_info)
config.add_extension('sgd_fast',
sources=['sgd_fast.pyx'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
config.add_extension('sag_fast',
sources=['sag_fast.pyx'],
include_dirs=numpy.get_include())
# add other directories
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| mit |
bakfu/benchmark | bench/tools.py | 1 | 2989 | import os
import random
import numpy as np
from sklearn.ensemble import RandomForestClassifier
import nltk
from bakfu.core.routes import register
from bakfu.core.classes import Processor
import logging
log = logger = logging.getLogger('bench')
result_logger = logging.getLogger('bench_results')
@register('bench.ml1')
class BenchProcess(Processor):
'''
Run a random forest and test quality.
'''
init_args = ()
init_kwargs = ('n_estimators',)
run_args = ()
run_kwargs = ()
def run(self, baf, *args, **kwargs):
print('BENCH....')
baf = self.chain = bakfu.Chain(lang=lang)
def __init__(self, *args, **kwargs):
super(BenchProcess, self).__init__(*args, **kwargs)
self.n_estimators = kwargs.get('n_estimators', 50)
def run(self, caller, *args, **kwargs):
super(BenchProcess, self).run(caller, *args, **kwargs)
baf = caller
data_source = caller.get_chain('data_source')
language = baf.get('language')
stop_words = nltk.corpus.stopwords.words(baf.get('language'))
if language == 'french':
stop_words.extend([u'les',u'la|le',u'\xeatre',u'@card@',u'avoir',u'@ord@',u'aucun',u'oui',u'non',
u'aucune',u'jamais',u'voir',u'n/a',u'ras',u'nil',u'nous',
u'chez','nous','quand',u'',u'',u'Les',u'[i]',u'si',u'\xe0','('])
data_source.get_data()
labels = baf.get_chain('targets')
answers = data_source.get_data()
#classifier = RandomForestClassifier(n_estimators=self.n_estimators)
classifier = baf.get_chain("classifier")
X=baf.data['vectorizer_result']
score=[0,0]
NUM_RUNS = 50
SAMPLE_SIZE = 50
if os.environ.get('BENCH_FAST','0')=='1':
#Fast mode...
NUM_RUNS = 2
SAMPLE_SIZE = 5
if len(answers)<SAMPLE_SIZE:
SAMPLE_SIZE = len(answers)
for run in range(NUM_RUNS):
print("run {}".format(run))
for i in range(SAMPLE_SIZE):
#for i in range(X.shape[0]):
print(i)
#X2 = np.array([X[i].toarray()[0] for j in range(X.shape[0])] if j!=i)
#labels2 = [labels[j] for j in range(X.shape[0])] if j!=i]
X2 = np.array([X[j].toarray()[0] for j in range(X.shape[0]) if i!=j])
labels2 = [labels[j] for j in range(X.shape[0]) if j!=i]
classifier.fit(X2,np.array(labels2))
pred = classifier.predict([X.toarray()[i],])[0]
if pred==labels[i]:
score[0]+=1
else:
score[1]+=1
result_logger.info(score)
R=score[0]/float(sum(score))
result_logger.info("Score : good : \t {} ; \t bad : \t : {} \t ratio {}" .format(score[0],score[1],R))
self._data['score'] = (score[0],score[1],R)
return self
| bsd-3-clause |
depet/scikit-learn | sklearn/tests/test_base.py | 9 | 5815 |
# Author: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.utils import deprecated
#############################################################################
# A few test classes
class MyEstimator(BaseEstimator):
def __init__(self, l1=0, empty=None):
self.l1 = l1
self.empty = empty
class K(BaseEstimator):
def __init__(self, c=None, d=None):
self.c = c
self.d = d
class T(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class DeprecatedAttributeEstimator(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
if b is not None:
DeprecationWarning("b is deprecated and renamed 'a'")
self.a = b
@property
@deprecated("Parameter 'b' is deprecated and renamed to 'a'")
def b(self):
return self._b
class Buggy(BaseEstimator):
" A buggy estimator that does not set its parameters right. "
def __init__(self, a=None):
self.a = 1
class NoEstimator(object):
def __init__(self):
pass
def fit(self, X=None, y=None):
return self
def predict(self, X=None):
return None
class VargEstimator(BaseEstimator):
"""Sklearn estimators shouldn't have vargs."""
def __init__(self, *vargs):
pass
#############################################################################
# The tests
def test_clone():
"""Tests that clone creates a correct deep copy.
We create an estimator, make a copy of its original state
(which, in this case, is the current state of the estimator),
and check that the obtained copy is a correct deep copy.
"""
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
new_selector = clone(selector)
assert_true(selector is not new_selector)
assert_equal(selector.get_params(), new_selector.get_params())
selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
new_selector = clone(selector)
assert_true(selector is not new_selector)
def test_clone_2():
"""Tests that clone doesn't copy everything.
We first create an estimator, give it an own attribute, and
make a copy of its original state. Then we check that the copy doesn't
have the specific attribute we manually added to the initial estimator.
"""
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = "test"
new_selector = clone(selector)
assert_false(hasattr(new_selector, "own_attribute"))
def test_clone_buggy():
"""Check that clone raises an error on buggy estimators."""
buggy = Buggy()
buggy.a = 2
assert_raises(RuntimeError, clone, buggy)
no_estimator = NoEstimator()
assert_raises(TypeError, clone, no_estimator)
varg_est = VargEstimator()
assert_raises(RuntimeError, clone, varg_est)
def test_clone_empty_array():
"""Regression test for cloning estimators with empty arrays"""
clf = MyEstimator(empty=np.array([]))
clf2 = clone(clf)
assert_array_equal(clf.empty, clf2.empty)
clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]])))
clf2 = clone(clf)
assert_array_equal(clf.empty.data, clf2.empty.data)
def test_repr():
"""Smoke test the repr of the base estimator."""
my_estimator = MyEstimator()
repr(my_estimator)
test = T(K(), K())
assert_equal(
repr(test),
"T(a=K(c=None, d=None), b=K(c=None, d=None))"
)
some_est = T(a=["long_params"] * 1000)
assert_equal(len(repr(some_est)), 415)
def test_str():
"""Smoke test the str of the base estimator"""
my_estimator = MyEstimator()
str(my_estimator)
def test_get_params():
test = T(K(), K())
assert_true('a__d' in test.get_params(deep=True))
assert_true('a__d' not in test.get_params(deep=False))
test.set_params(a__d=2)
assert_true(test.a.d == 2)
assert_raises(ValueError, test.set_params, a__a=2)
def test_get_params_deprecated():
# deprecated attribute should not show up as params
est = DeprecatedAttributeEstimator(a=1)
assert_true('a' in est.get_params())
assert_true('a' in est.get_params(deep=True))
assert_true('a' in est.get_params(deep=False))
assert_true('b' not in est.get_params())
assert_true('b' not in est.get_params(deep=True))
assert_true('b' not in est.get_params(deep=False))
def test_is_classifier():
svc = SVC()
assert_true(is_classifier(svc))
assert_true(is_classifier(GridSearchCV(svc, {'C': [0.1, 1]})))
assert_true(is_classifier(Pipeline([('svc', svc)])))
assert_true(is_classifier(Pipeline([('svc_cv',
GridSearchCV(svc, {'C': [0.1, 1]}))])))
def test_set_params():
# test nested estimator parameter setting
clf = Pipeline([("svc", SVC())])
# non-existing parameter in svc
assert_raises(ValueError, clf.set_params, svc__stupid_param=True)
# non-existing parameter of pipeline
assert_raises(ValueError, clf.set_params, svm__stupid_param=True)
# we don't currently catch if the things in pipeline are estimators
#bad_pipeline = Pipeline([("bad", NoEstimator())])
#assert_raises(AttributeError, bad_pipeline.set_params,
#bad__stupid_param=True)
| bsd-3-clause |
agartland/pysieve | distance.py | 1 | 31210 | """
distance.py
Distance functions to be called by the analysis functions.
TODO:
- Standardize inputs to distance functions to make the architecture more "plug-n-play"
- Release a version of seqtools with the required functions to satisfy dependencies
Generally a distance function should have the following inputs:
- Sieve data object
- Parameters
(Q: I use a dict of params to simplify the arguments, but is it confusing that each distance function requires different params?)
and should return either:
- A 2D [nPTIDs x nSites] pd.DataFrame
- A 3D [nPTIDs x nSites x nParams] numpy.ndarray for multiple parameter values
NOTES:
Currently it looks as though with low nPerms and high simulation repeats prepareBA and binding distance computations
are the speed bottlenck. I could probably speed up the distance comp by using the 4D MATLAB style BA matrix and numexpr
since this would vectorize the <, >, and sum operations."""
from seqdistance.matrices import binarySubst, addGapScores
from seqdistance import hamming_distance
import numpy as np
import pandas as pd
import itertools
from HLAPredCache import *
from decimal import Decimal
__all__=['_vxmatch_distance',
'_binding_escape_distance',
'_nepitope_distance',
'_indel_escape_distance',
'_prepareSeqBA',
'_prepareAlignBA',
'_prepareBTBA',
'_prepareBA',
'_similarity_score',
'_findall',
'_binding_scan_distance',
'_indel_scan_distance',
'_epitope_mismatch_distance',
'_relative_binding_escape_distance']
def _vxmatch_distance(insertSeq, seqDf, params):
"""Computes a vaccine insert distance for each breakthrough sequence
based on the specified "subst" similarity matrix.
Used by the global and local vxmatch sieve analyses.
NOTE:
Normalization is performed using a mean similarity across the sequence, but applied site-wise.
This is important for stability in the normalization, but may be under estimating the contribution of
rare amino-acids when non-binary and non-normalized similarity matrices are used (e.g. HIV PAM)
Parameters
----------
insertSeq : str
Amino acid sequence of the vaccine insert/immunogen
seqDf : pd.DataFrame
Contains the breakthrough sequences (see pysieve.data for details)
params : dict
Should contain a similarity matrix 'subst', a dict of amino acid similarity scores.
Returns
-------
dist : pd.DataFrame
Distance matrix [ptids x sites] with PTID row-index and sites (0-indexed) as columns."""
try:
subst = params['subst']
except KeyError:
print 'Using default binary substitution matrix.'
subst = addGapScores(binarySubst, None)
N = seqDf.shape[0]
nSites = len(insertSeq)
sim = np.ones((N, nSites))
"""Compute insert2insert similarity for normalization.
Similarity of insert to itself is high (exactly 1 for a hamming distance)"""
insert2insert = np.nanmean(_similarity_score(insertSeq, insertSeq, subst = subst))
for ptidi,ptid in enumerate(seqDf.index):
"""Similarity of a sequence to itself is also high and will be used in the normalization"""
seq2seq = np.nanmean(_similarity_score(seqDf.seq[ptid], seqDf.seq[ptid], subst = subst))
sim[ptidi,:] = _similarity_score(seqDf.seq[ptid], insertSeq, subst = subst, denominator = seq2seq + insert2insert)
"""similarity --> distance only works with correct normalization"""
dist = 1 - sim
return pd.DataFrame(dist, index = seqDf.index, columns = np.arange(nSites))
def _nepitope_distance(insertBA, btBA, params):
"""Creates a distance matrix (DataFrame) [N x sites]
indicating the number of PTIDs predicted to have an insert epitope for each kmer
Parameters
----------
insertBA : pd.DataFrame
Row index as HLAs and colums as sites, shape [len(uHLA4) x nSites]
btBA : dict
Dict contains keys for (1) "validHLAs" HLAs used, (2) "ba" array([nHLAs (4 typically), nSites]) and (3) "ptid"
params : dict
Should contain 'binding' and 'nmer' parameters.
Returns
-------
dist : pd.DataFrame
Distance matrix [ptids x sites] with PTID row-index and sites (kmer start positions 0-indexed) as columns."""
N = len(btBA['ptid'])
nSites = insertBA.shape[1]
dist = np.nan * np.ones((N,nSites))
for ptidi,ptid,ba,h in zip(np.arange(N), btBA['ptid'], btBA['ba'], btBA['validHLAs']):
tmpInsert = insertBA.loc[h]
"""Do not double count escapes from homozygous alleles"""
dummy,uniqi = np.unique(h, return_index = True)
"""Note that the original binding escape distance sums across HLAs not any's """
#dist[ptidi,:]=np.squeeze(np.any((tmpInsert<params['binding']),axis=0))
dist[ptidi,:] = np.squeeze(np.sum((tmpInsert < params['binding']).values[uniqi,:], axis = 0))
return pd.DataFrame(dist, index = btBA['ptid'], columns = np.arange(nSites))
def _indel_escape_distance(insertSeq, insertBA, seqDf, btBA, params):
"""Creates a distance matrix (DataFrame) [N x sites] with PTID rows and sites as columns
populated with the HLA binding escape count distance
Parameters
----------
insertSeq : str
Amino acid sequence of the vaccine insert/immunogen
insertBA : pd.DataFrame
Row index as HLAs and colums as sites, shape [len(uHLA4) x nSites]
seqDf : pd.DataFrame
Contains the breakthrough sequences (see pysieve.data for details)
btBA : dict
Dict contains keys for (1) "validHLAs" HLAs used, (2) "ba" array([nHLAs (4 typically), nSites]) and (3) "ptid"
params : dict
Should contain 'binding' and 'nmer' parameters.
Returns
-------
dist : pd.DataFrame
Distance matrix [ptids x sites] with PTID row-index and sites (kmer start positions 0-indexed) as columns."""
N = seqDf.shape[0]
nSites = insertBA.shape[1]
dist = np.nan * np.ones((N, nSites))
for ptidi,ptid in enumerate(seqDf.index):
"""An indel is 'shared' if its found in both the insert and breakthrough kmer"""
unsharedIndel = np.zeros((len(btBA['validHLAs'][ptidi]), nSites), dtype = bool)
for sitei in range(nSites):
"""grabKmer returns None if the kmer is invalid (e.g. off the end of the sequence)"""
insertMer,_nonGapped = grabKmer(insertSeq, sitei, params['nmer'])
btMer,_nonGapped = grabKmer(seqDf.seq[ptid], sitei, params['nmer'])
"""If the insert and the bt mer don't have gaps in the same places then there is an indel!"""
if not insertMer is None:
if btMer is None or not np.all(np.array(_findall(insertMer,'-')) == np.array(_findall(btMer,'-'))):
unsharedIndel[:,sitei] = True
tmpInsert = insertBA.loc[btBA['validHLAs'][ptidi]]
"""Do not double count escapes from homozygous alleles"""
dummy,uniqi = np.unique(btBA['validHLAs'][ptidi], return_index=True)
"""ANY HLA: Escape count is 1 if the kmer binds and there is an indel in the BT seq"""
#dist[ptidi,:] = np.squeeze(np.any((tmpInsert < params['binding']) & unsharedIndel, axis=0))
"""SUM HLAs: count 1 escape per HLA that binds (not including homozygous alleles"""
dist[ptidi,:] = np.squeeze(np.sum(((tmpInsert < params['binding']).values & unsharedIndel)[uniqi,:],axis=0))
return pd.DataFrame(dist, index = btBA['ptid'], columns = np.arange(nSites))
def _binding_escape_distance(insertSeq, insertBA, seqDf, btBA, params):
"""Creates a distance matrix (pd.DataFrame) [ptids x sites]
populated with the HLA binding escape count distance.
TODO:
- Handle 15mer 'unique core' distances
- Standardize the input arguments to match other distances
(or at least other T cell based distances)
- Make the handling of homozygous alleles and multiple escapes per kmer
a parameter.
Parameters
----------
insertSeq : str
Amino acid sequence of the vaccine insert/immunogen
insertBA : pd.DataFrame
Row index as HLAs and colums as sites, shape [len(uHLA4) x nSites]
seqDf : pd.DataFrame
Contains the breakthrough sequences (see pysieve.data for details)
btBA : dict
Dict contains keys for (1) "validHLAs" HLAs used, (2) "ba" array([nHLAs (4 typically), nSites]) and (3) "ptid"
params : dict
Should contain 'binding', 'escape' and 'nmer' parameters.
Returns
-------
dist : pd.DataFrame
Distance matrix [ptids x sites] with PTID row-index and sites (kmer start positions 0-indexed) as columns."""
N = len(btBA['ptid'])
nSites = insertBA.shape[1]
"""Don't count a binding escape if there's also an insertion/deletion there
(these distances should be mutually exclusive)
Import to make indelDist 0s and 1s to work for this purpose"""
indelDist = (_indel_escape_distance(insertSeq, insertBA, seqDf, btBA, params).values > 0).astype(int)
dist = np.nan * np.ones((N,nSites))
for ptidi,ptid,ba,h in zip(np.arange(N), btBA['ptid'], btBA['ba'], btBA['validHLAs']):
"""Maxtrix of binding affinities for the hla alleles in h"""
tmpInsert = insertBA.loc[h]
"""Do not double count escapes from homozygous alleles"""
dummy,uniqi = np.unique(h, return_index=True)
"""ANY HLA: For each HLA (typically 4 per PTID), if it meets the criteria for this kmer then its an escape"""
#dist[ptidi,:] = np.squeeze(np.any((tmpInsert<params['binding']) & (ba>params['escape']),axis=0)) * (1-indelDist[ptidi,:])
"""SUM HLAS: Count multiple escapes per kmer if the person has multiple alleles with escape"""
dist[ptidi,:] = np.squeeze(np.sum(((tmpInsert < params['binding']).values & (ba > params['escape']))[uniqi,:], axis=0)) * (1 - indelDist[ptidi,:])
return pd.DataFrame(dist, index=btBA['ptid'], columns=np.arange(nSites))
def _prepareSeqBA(seq, hlas, ba, k, ignoreGappedKmers=False, getFast=False):
"""Prepare a matrix of binding affinities for all kmers in seq and all hlas.
Parameters
----------
seqs : collection
Aligned sequences/strings
hlas : collection
HLA alleles
ba : dict/HLAPredCache
Contains all neccessary predicted binding affinities for propagating the matrix.
Keys are tuples (allele, peptide)
k : int
Length of the peptides.
ignoreGappedKmers : bool
If False then kmer continues for full k AA,
but if True then throws out all kmers with a gap at any location in any bt sequence.
getFast : bool
If True, uses the getFast method of hlaPredCache, w/o error checking
Returns
-------
baMat : pd.DataFrame, [nHLAs x nSites]
Matrix of binding affinities with rows HLAs and columns as 0-indexed kmer start positions."""
nSites = len(seq)
baMat = np.nan * np.ones((len(hlas),nSites))
if getFast:
baFunc = lambda t: ba.getFast(t)
"""Replace all '*' here just in case, if trying to use getFast"""
originalHLAs = hlas
hlas = [h.replace('*','_') for h in hlas]
else:
originalHLAs = hlas
baFunc = lambda t: ba[t]
for sitei in range(nSites):
"""ngmer is None if the insert kmer starts with a gap '-', leave these as nan"""
gmer,ngmer = grabKmer(seq,sitei,k)
if not ignoreGappedKmers:
"""Use ngmer which starts at sitei and grabs the next nmer AAs (not counting gaps)"""
mer = ngmer
else:
mer = gmer
if (not mer is None) and (not '-' in mer):
for hlai,hla in enumerate(hlas):
if isvalidHLA(hla):
baMat[hlai,sitei] = baFunc((hla,mer))
return pd.DataFrame(baMat, index = originalHLAs, columns = np.arange(nSites))
def _prepareAlignBA(seqs, hlas, ba, k, ignoreGappedKmers = False, getFast = False):
"""Prepare a matrix of binding affinities for all kmers, all HLAs and all seqs.
Parameters
----------
seqs : collection
Aligned sequences/strings
hlas : collection
HLA alleles
ba : dict/HLAPredCache
Contains all neccessary predicted binding affinities for propagating the matrix.
Keys are tuples (allele, peptide)
k : int
Length of the peptides.
ignoreGappedKmers : bool
If False then kmer continues for full k AA,
but if True then throws out all kmers with a gap at any location in any bt sequence.
getFast : bool
If True, uses the getFast method of hlaPredCache, w/o error checking
Returns
-------
baMat : ndarray [nSeqs x nHLAs x nSites]
Matrix of binding affinities"""
nSites = int(np.median([len(s) for s in seqs]))
baMat = np.nan * np.ones((len(seqs), len(hlas), nSites))
"""Go through each person, creating a bt BA [len(hla) x nSites] and assign to the big matrix"""
for seqi,seq in enumerate(seqs):
baMat[seqi,:,:] = _prepareSeqBA(seq, hlas, ba, k, ignoreGappedKmers, getFast).values
"""Ignore any kmer that had a gap in any BT sequence"""
if ignoreGappedKmers:
"""If the BA is nan for all HLAs then the kmer must have had a gap.
If aross people, any kmer had a gap then set all people nan there"""
badSites = np.any(np.all(np.isnan(baMat), axis=1), axis=0)
"""TODO: this could be simplified using numpy broadcasting"""
baMat[np.tile(badSites[None,None,:], (baMat.shape[0], baMat.shape[1],1))] = np.nan
return baMat
def _prepareBTBA(data, ba, params):
"""Prepare matrix of log-IC50 binding affinities of BREAKTHROUGH sequences given the dict/HLAPredCache ba
Only the BA for alleles expressed by each subject are returned.
Q:What calls this function and why?
Parameters
----------
data : pysieve.sieveData object
ba : dict/hlaPredCache
params : dict
Required keys: nmer, ignoreGappedKmers, getFast
Returns
-------
btBA : dict of lists with rows per ptid
Dict keys for (1) "validHLAs" HLAs used,
(2) "ba" array([nHLAs (4 typically), nSites])
(3) "ptid"
"""
nSites = len(data.insertSeq)
fullBTBA = _prepareAlignBA(data.seqDf.seq,
data.uHLA4,
ba,
params['nmer'],
ignoreGappedKmers = params['ignoreGappedKmers'],
getFast = params['getFast'])
"""Go through each person, creating a bt BA [len(hla) x nSites]"""
btBA = {'ptid':[],'validHLAs':[],'ba':[]}
for ptid,row in data.seqDf.iterrows():
ptidi = list(data.seqDf.index).index(ptid)
btSeq = row['seq']
HLAs = data.ptidDf.hla[ptid]
validHLAs = []
for hla in HLAs:
if isvalidHLA(hla):
validHLAs.append(hla)
"""New way using the full BTBA"""
tmpba = np.nan * np.ones((len(validHLAs),nSites))
for i,h in enumerate(validHLAs):
hlai = list(data.uHLA4).index(h)
tmpba[i,:]=fullBTBA[ptidi,hlai,:]
btBA['ptid'].append(ptid)
btBA['validHLAs'].append(validHLAs)
btBA['ba'].append(tmpba)
return btBA
def _prepareBA(data, ba, params):
"""Prepare matrices of log-IC50 HLA binding affinities for insert and breakthrough sequences
Used by several T-cell based sieve distances.
TODO:
Currently this function returns two different btBA data objects depending on fullBT
This is probably not a good idea.
Parameters
----------
data : pysieve.sieveData object
ba : dict/hlaPredCache
params : dict
Required keys: nmer, fullBT, ignoreGappedKmers, getFast
Returns
-------
insertBA : pd.DataFrame, shape [len(uHLA4) x nSites]
Row index as HLAs and colums as start positions
btBA : variable
Dict of lists with rows per ptid, dict keys for
(1) "validHLAs" HLAs used,
(2) "ba" array([nHLAs (4 typically), nSites])
(3) "ptid"
OR if fullBT is True, a 3D ndarray [nSeqs x nHLAs x nSites]
method : str
Describing the prediction method used (from ba)"""
fullBT = params.get('fullBT',False)
getFast = params.get('getFast',False)
ignoreGappedKmers = params.get('ignoreGappedKmers',False)
"""Create matrix of insert BA [len(uHLA4) x nSites]"""
insertBA = _prepareSeqBA(data.insertSeq,
data.uHLA4,
ba,
params['nmer'],
ignoreGappedKmers = params['ignoreGappedKmers'],
getFast = params['getFast'])
if not fullBT:
btBA = _prepareBTBA(data,ba,params)
else:
"""New structure required by _relative_binding_escape distance"""
btBA = _prepareAlignBA(data.seqDf.seq,
data.uHLA4,
ba,
params['nmer'],
ignoreGappedKmers = params['ignoreGappedKmers'],
getFast = params['getFast'])
return insertBA, btBA, ba.predictionMethod
def _similarity_score(seq1, seq2, subst, denominator = 2):
"""Return a vector of site-wise similarities between two sequences based on a substitution matrix (dict).
Optionally can give a denominator for normalization.
Example denominator: sim11 + sim22 which is the sum of seq1 to itself and seq2 to itself.
Denominator can be supplied as a vector, in which case the normalization is done site-wise or
as a scalar in which case it is equivalent to applying the normalization for the whole sequence
(even though this function will still give the distance site-wise)
By default there is no normalization (denominator = 2).
This can create a problem for vxmatch using similarity matrices other than binarySubst"""
"""Similarity between seq1 and seq2 using the substitution matrix subst"""
sim12 = np.array([i for i in itertools.imap(lambda a,b: subst.get((a,b), subst.get((b,a))), seq1, seq2)])
return (2 * sim12) / denominator
def _findall(s, item):
"""Return index of each element equal to item in s"""
return [i for i,x in enumerate(s) if x == item]
def _indel_scan_distance(insertSeq, insertBA, seqDf, btBA, nmer, paramPairs, minDelta):
"""Creates a distance matrix ndarray [N x sites x params]
populated with the indel escape count distance"""
N = seqDf.shape[0]
nSites = insertBA.shape[1]
nParams = len(paramPairs)
minDelta = Decimal('%1.1f' % minDelta)
dist = np.nan*np.ones((N,nSites,nParams))
for ptidi,ptid in enumerate(seqDf.index):
unsharedIndel = np.zeros((len(btBA['validHLAs'][ptidi]),nSites), dtype=bool)
"""Determine if there are any 'unshared' gaps in each kmer"""
for sitei in xrange(nSites):
insertMer,_nonGapped = grabKmer(insertSeq,sitei,nmer)
btMer,_nonGapped = grabKmer(seqDf.seq[ptid],sitei,nmer)
"""If the insert and the bt mer don't have gaps in the same places then there is an indel!"""
if not insertMer is None:
if btMer is None or not np.all(np.array(_findall(insertMer,'-')) == np.array(_findall(btMer,'-'))):
unsharedIndel[:,sitei] = True
tmpInsert = insertBA.loc[btBA['validHLAs'][ptidi]]
"""Do not double count escapes from homozygous alleles"""
dummy,uniqi = np.unique(btBA['validHLAs'][ptidi], return_index=True)
"""For each parameter pair compute the distance"""
for parami,pp in enumerate(paramPairs):
"""A pair of binding and escape thresholds are only valid if escape - binding > delta"""
if (pp[1]-pp[0]) > minDelta:
"""If any of the HLAs (typically 4 per PTID) meets the criteria for this kmer then its an escape"""
#dist[ptidi,:,parami]=np.squeeze(np.any((tmpInsert < pp[0]) & unsharedIndel,axis=0))
"""The original binding escape distance, for EACH of the HLAs that meets the criteria...its an escape"""
dist[ptidi,:,parami] = np.squeeze(np.sum(((tmpInsert<np.float64(pp[0])).values & unsharedIndel)[uniqi,:],axis=0))
return dist
def _binding_scan_distance(insertSeq, insertBA, seqDf, btBA, paramPairs, minDelta, nmer):
"""Creates a distance matrix ndarray [N x sites x params]
populated with the HLA binding escape count distance"""
N = len(btBA['ptid'])
nSites = insertBA.shape[1]
nParams = len(paramPairs)
minDelta = Decimal('%1.1f' % minDelta)
"""Don't count a binding escape if there's also an indel there (these distances should be mutually exclusive)
Import to dichotomize this distance when used here though"""
#indelDist = (_indel_scan_distance(insertSeq, insertBA, seqDf, btBA, nmer, paramPairs, minDelta) > 0).astype(np.int64)
"""Nan is default value!
This means that the following will be nan in dist:
non-existent insert kmers (ie start with '-') (ACTUALLY THESE SHOULD BE ZEROS)
invalid parameters (insufficient delta)
filteredDist will have nans at kmers that have been filtered out
it doesn't appear that there are other nans
This is important because it means in the compstats for binding escape i could set nan to 0
"""
dist = np.nan * np.ones((N, nSites, nParams))
dummydist = np.nan * np.ones((N, nSites, nParams))
for ptidi,ptid,ba,h in zip(np.arange(N), btBA['ptid'], btBA['ba'], btBA['validHLAs']):
tmpInsert = insertBA.loc[h]
"""Do not double count escapes from homozygous alleles"""
dummy,uniqi = np.unique(h, return_index=True)
for parami,pp in enumerate(paramPairs):
"""A pair of binding and escape thresholds are only valid if escape - binding > delta"""
if (pp[1]-pp[0]) > minDelta:
tmp=(tmpInsert < np.float64(pp[0])).values & (ba > np.float64(pp[1]))
"""Sum across HLAs"""
dist[ptidi,:,parami] = np.sum(tmp[uniqi,:], axis=0) #* (1-indelDist[ptidi,:,parami])
dummydist[ptidi,:,parami] = 1
return dist
def _epitope_mismatch_distance(seqDf, insertSeq, insertDf, insertBA, btBA, params):
"""Creates a distance matrix (DataFrame) [N x sites]
indicating the PTIDs with an insert epitope and a
breakthrough with greater than mmTolerance substitutions relative to the reference.
Parameters
----------
seqDf : pd.DataFrame
PTID as index with seq column containing breakthrough sequences
insertSeq : str
Amino acid sequence of the insert
insertBA : pd.DataFrame
Row index as HLAs and colums as sites, shape [len(uHLA4) x nSites]
btBA : dict
Dict contains keys for (1) "validHLAs" HLAs used, (2) "ba" array([nHLAs (4 typically), nSites]) and (3) "ptid"
params : dict
Should contain binding, nmer, ignoreGappedKmers, and mmTolerance parameters.
Returns
-------
dist : pd.DataFrame
Distance matrix [ptids x sites] with PTID row-index and sites (kmer start positions 0-indexed) as columns."""
N = len(btBA['ptid'])
nSites = insertBA.shape[1]
mmCount = np.zeros((N, nSites))
for sitei in range(nSites):
"""ngmer is None if the insert kmer starts with a gap '-', leave these as nan"""
igmer,ingmer = grabKmer(insertSeq, sitei, params['nmer'])
if not params['ignoreGappedKmers']:
"""Use ngmer which starts at sitei and grabs the next nmer AAs (not counting gaps)"""
imer = ingmer
else:
imer = igmer
for ptidi,ptid in zip(np.arange(N), btBA['ptid']):
btgmer,btngmer = grabKmer(seqDf.seq.loc[ptid], sitei, params['nmer'])
if not params['ignoreGappedKmers']:
"""Use ngmer which starts at sitei and grabs the next nmer AAs (not counting gaps)"""
btmer = btngmer
else:
btmer = btgmer
mmCount[ptidi,sitei] = hamming_distance(imer, btmer, asStrings=True)
dist = np.nan * np.ones((N, nSites))
for ptidi,ptid,ba,h in zip(np.arange(N), btBA['ptid'], btBA['ba'], btBA['validHLAs']):
tmpInsert = insertBA.loc[h]
"""Do not double count escapes from homozygous alleles"""
dummy,uniqi = np.unique(h, return_index=True)
"""ANY HLA allele binds AND mmCount is greater than mmTolerance"""
dist[ptidi,:] = np.squeeze(np.any((tmpInsert < params['binding']).values[uniqi,:], axis=0))
dist[ptidi,:] = dist[ptidi,:] & (mmCount[ptidi,:] > params['mmTolerance'])
return pd.DataFrame(dist.astype(np.float64), index=btBA['ptid'], columns=np.arange(nSites))
def _identifyNoPressureBT(insertBA, hlaMatrix, params):
"""Identify the bt kmers that aren't under pressure from potential vaccine epitopes (the person doesn't have any binding HLAs for the kmer location)
Returns a numpy array [N x 1 x nSites]
insertBA: [nHLAs x nSites] pd.DataFrame
hlaMatrix: boolean matrix of HLA expression [ptid x nHLAs]
params: binding"""
N = hlaMatrix.shape[0]
nSites = insertBA.shape[1]
"""Identify the binding HLAs at each site: [nHLAs x nSites] bool"""
bindingHLAs = insertBA < params['binding']
"""Identify the breakthrough kmers (BA) associated with people lacking a binding allele at each site"""
"""[ptid x hla x site]"""
tmpHLA = np.tile(hlaMatrix.values[:,:,None],(1,1,nSites))
tmpBinding = np.tile(bindingHLAs.values[None,:,:],(N,1,1))
"""Use this to index into btBA to pull out the BA for kmers that were not under pressure by the HLA (e.g. btBA[:,hlai,:][noPressureBT])"""
"""[ptid x 1 x sites]"""
"""For each bt kmer, for it not to be under pressure,
it must either not be an insert binder or not be an HLA allele that the person expresses, across all HLAs"""
noPressureBT = np.all(~tmpBinding | ~tmpHLA, axis=1)[:,None,:]
"""If any sites don't have any bt kmers that are not under pressure then raise an exception"""
if np.any(np.squeeze(noPressureBT.sum(axis=0)) == 0):
raise Exception("Can't compute escape threshold for all kmers!")
return noPressureBT
def _relative_binding_escape_distance(insertSeq,insertBA,seqDf,btBA,hlaMatrix,params,lookupDf):
"""Creates a distance matrix (DataFrame) [N x sites] with PTID rows and sites as columns
populated with the HLA binding escape count distance
This is "Allan's distance" and it differs from the binding_escape distance
only in how the escape threshold is determined. The escape threshold is the
median of the BA of the non-binding HLAs with a given BT peptide
insertSeq: AA str
insertBA: [nHLAs x nSites] pd.DataFrame
seqDf: DataFrame with ptid rows and column seq containing BT seqs
btBA: FULL btBA matrix [nSeqs x nHLAs x nSites] ndarray (neccessary for this method)
hlaMatrix: boolean matrix of HLA expression [ptid x nHLAs]
params: binding"""
N = btBA.shape[0]
nSites = insertBA.shape[1]
"""Don't count a binding escape if there's also an indel there (these distances should be mutually exclusive)
Import to make indelDist 0s and 1s to work for this purpose"""
#indelDist=(_indel_escape_distance(insertSeq,insertBA,seqDf,btBA,params).values > 0).astype(int64)
"""Identify the breakthrough kmers (BA) associated with people lacking a binding allele at each site"""
noPressureBT = _identifyNoPressureBT(insertBA, hlaMatrix, params)
"""with open(DATA_PATH + 'STEP/andrew_rel_binding_escape.csv','w') as fh:
print >> fh, 'position,seqid,ptid,hla,insert_peptide,bt_peptide,rbe,be,cutoff,ndiff,y,BET'"""
dist = np.nan * np.ones((N,nSites))
for ptidi,ptid in enumerate(seqDf.index):
"""Slice insertBA so that it only contains the valid HLAs of the current ptid"""
validHLAs = [h for h in hlaMatrix.columns[hlaMatrix.ix[ptid]] if isvalidHLA(h)]
validHLAInd = np.array([h in validHLAs for h in hlaMatrix.columns])
"""[4 x nSites]"""
tmpInsert = insertBA.ix[validHLAs].values
"""Do not double count escapes from homozygous alleles"""
uValidHLAs,uniqi = np.unique(validHLAs, return_index=True)
"""For each HLA (typically 4 per PTID), if it meets the criteria for this kmer then its an escape"""
"""[4 x nSites]"""
insertBinders = (tmpInsert < params['binding'])
"""This is a complicated step:
(1) Pull out the BA for the HLAs associated with this ptid [N x 4 x nSites]
(2) Tile the noPressureBT index matrix along the hla axis [N x 4 nSites]
(3) Index (1) using (2) to yield a matrix of the btBAs with the current HLAs (4) at the sequences of people that had no binding HLA
[nonBinders x 4 x nSites]
(4) Take the median across nonBinders
[4 x nSites]
"""
"""Set all btBA that are under pressure from an HLA to nan prior to taking the median across bt kmers"""
tmpNoPressureInd = np.tile(noPressureBT, (1,len(validHLAs),1))
tmpBtBA = btBA[:,validHLAInd,:]
tmpBtBA[~tmpNoPressureInd] = np.nan
"""tauThresholds [4 x nSites]"""
tauThresholds = np.nanmedian(tmpBtBA, axis=0)
"""nonBTBinders [4 x nSites]"""
nonBTBinders = (btBA[ptidi,validHLAInd,:] > tauThresholds) & (tmpInsert < btBA[ptidi,validHLAInd,:])
escapes = (insertBinders & nonBTBinders)[uniqi,:]
dist[ptidi,:] = np.squeeze(np.sum(escapes, axis=0))# * (1-indelDist[ptidi,:])
"""for hi in arange(escapes.shape[0]):
hla=uValidHLAs[hi]
hlai=insertBA.index==hla
seqid=lookupDf.index[lookupDf.ptid==ptid][0]
for kmeri in arange(escapes.shape[1]):
gapped,insertKmer=grabKmer(insertSeq,kmeri,k=params['nmer'])
gapped,btKmer=grabKmer(seqDf.seq[ptid],kmeri,k=params['nmer'])
try:
hdist=hamming_distance(insertKmer,btKmer)
except TypeError:
hdist=nan
if seqid=='5021709' and kmeri==10 and hla=='A_2301':
continue
raise Exception()
#print >> fh, 'position,seqid,ptid,hla,insert_peptide,bt_peptide,rbe,be,cutoff,ndiff,y,BET'
print >> fh, '%d,%s,%s,%s,%s,%s,%1.2f,%1.2f,%1.2f,%1.0f,%d,%1.1f' % (kmeri+1,
seqid,ptid,hla,insertKmer,btKmer,insertBA[kmeri][hlai],btBA[ptidi,hlai,kmeri],
tauThresholds[uniqi,:][hi,kmeri],hdist,escapes[hi,kmeri],params['binding'])"""
return pd.DataFrame(dist, index=seqDf.index, columns=insertBA.columns) | mit |
marqh/iris | lib/iris/tests/unit/plot/test_contour.py | 11 | 2995 | # (C) British Crown Copyright 2014 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for the `iris.plot.contour` function."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import numpy as np
from iris.tests.stock import simple_2d
from iris.tests.unit.plot import TestGraphicStringCoord, MixinCoords
if tests.MPL_AVAILABLE:
import iris.plot as iplt
@tests.skip_plot
class TestStringCoordPlot(TestGraphicStringCoord):
def test_yaxis_labels(self):
iplt.contour(self.cube, coords=('bar', 'str_coord'))
self.assertPointsTickLabels('yaxis')
def test_xaxis_labels(self):
iplt.contour(self.cube, coords=('str_coord', 'bar'))
self.assertPointsTickLabels('xaxis')
def test_yaxis_labels_with_axes(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
iplt.contour(self.cube, axes=ax, coords=('bar', 'str_coord'))
plt.close(fig)
self.assertPointsTickLabels('yaxis', ax)
def test_xaxis_labels_with_axes(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
iplt.contour(self.cube, axes=ax, coords=('str_coord', 'bar'))
plt.close(fig)
self.assertPointsTickLabels('xaxis', ax)
def test_geoaxes_exception(self):
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
self.assertRaises(TypeError, iplt.contour, self.lat_lon_cube, axes=ax)
plt.close(fig)
@tests.skip_plot
class TestCoords(tests.IrisTest, MixinCoords):
def setUp(self):
# We have a 2d cube with dimensionality (bar: 3; foo: 4)
self.cube = simple_2d(with_bounds=False)
self.foo = self.cube.coord('foo').points
self.foo_index = np.arange(self.foo.size)
self.bar = self.cube.coord('bar').points
self.bar_index = np.arange(self.bar.size)
self.data = self.cube.data
self.dataT = self.data.T
self.mpl_patch = self.patch('matplotlib.pyplot.contour')
self.draw_func = iplt.contour
if __name__ == "__main__":
tests.main()
| lgpl-3.0 |
SU-ECE-17-7/ibeis | _scripts/win32bootstrap.py | 2 | 23140 | # -*- coding: utf-8 -*-
r"""
Hacky file to download win packages
Please only download files as needed.
Args:
--dl {pkgname:str} : package name to download
--run : if true runs installer on win32
CommandLine:
python _scripts\win32bootstrap.py --dl winapi --run
python _scripts\win32bootstrap.py --dl pyperclip --run
python _scripts\win32bootstrap.py --dl pydot --run
python _scripts\win32bootstrap.py --dl pydot --run --nocache
python _scripts\win32bootstrap.py --dl numpy --run --upgrade
python _scripts\win32bootstrap.py --dl h5py --run
References:
http://www.lfd.uci.edu/~gohlke/pythonlibs/
Notes:
'http://www.graphviz.org/pub/graphviz/stable/windows/graphviz-2.38.msi'
"""
from __future__ import division, print_function
import parse
import sys
#import os
import utool as ut
#from six.moves import filterfalse
import urllib2
'http://downloads.sourceforge.net/project/opencvlibrary/opencv-win/2.4.10/opencv-2.4.10.exe'
opencv_alt_ext_href = 'https://sourceforge.net/projects/opencvlibrary/files/opencv-win/3.0.0-beta/'
UNOFFICIAL_WEBURL = 'http://www.lfd.uci.edu/~gohlke/pythonlibs/'
OS_VERSION = 'win32'
# cpython 27
PY_VERSION = 'cp27'
#PY_VERSION = 'py2.7'
#PY_VERSION = 'py3.4'
# force redownload of hrefs
FORCE = ut.get_argflag(('--nocache', '--upgrade'))
AMD64 = False
os_to_pkgmanager = {
'win32': 'win32unoff',
'darwin': 'ports',
'debian_family': 'apt-get',
'fedora_family': 'yum',
}
default_pkgmanager = os_to_pkgmanager[sys.platform]
# TODO: implement this
class CPlatPkg(object):
def __init__(self,
default_name,
pkgmanager_map={},
alias_list=[],
platform_specific=False,
import_name=None,
alt_link=None,
):
self.default_name = default_name
self.pkgmanager_map = {
'default': None,
'yum': None,
'apt-get': None,
'ports': None,
'win32unoff': None,
'pip': None,
}
self.alias_list = alias_list
self.import_name = import_name
if import_name is not None:
self.alias_list.append(import_name)
self.pkgmanager_map.update(pkgmanager_map)
# True if only available on certain platforms
self.platform_specific = platform_specific
def is_alias(self, pkgname):
return pkgname in self.alias_list or pkgname == self.default_name
def get_platform_pkgmanager_name(self, pkgname, pkgmanager=default_pkgmanager):
""" returns name for current platforms package manager """
pkgmanager_name = self.pkgmanager_map[pkgmanager]
if pkgmanager_name is None:
pkgmanager_name = self.default_name
return pkgmanager_name
def get_import_name(self):
if self.import_name is not None:
return self.import_name
else:
return self.default_name
def is_installed(self):
import_name = self.get_import_name()
try:
globals_ = globals()
locals_ = locals()
exec('import ' + import_name, globals_, locals_)
except ImportError:
return False
return True
# Define alt names that map to hosted files
cplat_alias_pkglist = [
CPlatPkg(
'pywin32',
import_name='win32api',
platform_specific=True,
),
CPlatPkg(
'pyperclip',
pkgmanager_map={
'pip': 'pyperclip'
}
),
CPlatPkg(
'line-profiler',
{'win32': 'line_profiler'},
['kernprof']),
CPlatPkg(
'numpy',
#{'win32': 'numpy-MKL'}
{'win32': 'numpy-1.9.2rc1+mkl-cp27-none-win32.whl'}
#'numpy-1.9.2rc1+mkl-cp27-none-win32.whl'
),
# alias_tup = (std_dict, alias_list)
# std_dict = keys=packagemanager, vals=truename
# alias_list = list of names
#({'default': 'line_profiler', }, ['line-profiler'],),
]
def resolve_alias(pkgname):
for cplat_pkg in cplat_alias_pkglist:
if cplat_pkg.is_alias(pkgname):
return cplat_pkg.get_platform_pkgmanager_name(pkgname)
return pkgname
KNOWN_PKG_LIST = [
'pip',
'python-dateutil',
'pyzmq',
'setuptools',
'Pygments',
'Cython',
'requests',
#'colorama',
'psutil',
#'functools32',
#'six', # use pip for this
'dateutil',
'pyreadline',
'pyparsing',
#'sip',
'PyQt4',
'Pillow',
#'numpy-MKL-1.9', # 'numpy',
'scipy',
'ipython',
'tornado',
'matplotlib',
'scikit-learn',
'statsmodels',
'pandas', # statsmodel uses pandas :(
'patsy', # statsmodel uses patsy
'simplejson',
# 'flask', # cant do flask
]
def get_uninstalled_project_names():
try:
import pip
pkg_set = set(KNOWN_PKG_LIST)
pathmeta_list = pip.get_installed_distributions()
installed_set = set([meta.project_name for meta in pathmeta_list])
uninstalled_set = pkg_set.difference(installed_set)
uninstalled_list = list(uninstalled_set)
except Exception as ex:
print(ex)
uninstalled_list = KNOWN_PKG_LIST
return uninstalled_list
def build_uninstall_script():
#import utool as ut
from os.path import join
#import parse
pydir = 'C:/Python27'
uninstall_list = ut.glob(pydir, 'Remove*.exe')
cmd_list = []
for exefname in uninstall_list:
parse_result = parse.parse('{pypath}Remove{pkgname}.exe', exefname)
pkgname = parse_result['pkgname']
logfname = pkgname + '-wininst.log'
logfpath = join(pydir, logfname)
exefpath = join(pydir, exefname)
cmd = '"' + exefpath + '" -u "' + logfpath + '"'
cmd_list.append(cmd)
script_text = ('\n'.join(cmd_list))
print(script_text)
def main():
r"""
python win32bootstrap.py --dl numpy --nocache
python win32bootstrap.py --dl numpy-1.9.2rc1 --force
python win32bootstrap.py --dl numpy-1.9.2rc1 --run
python win32bootstrap.py --force
python win32bootstrap.py --dryrun
python win32bootstrap.py --dryrun --dl numpy scipy
python win32bootstrap.py --dl numpy
C:\Users\jon.crall\AppData\Roaming\utool\numpy-1.9.2rc1+mkl-cp27-none-win32.whl
pip install C:/Users/jon.crall/AppData/Roaming/utool/numpy-1.9.2rc1+mkl-cp27-none-win32.whl
"""
# Packages that you are requesting
pkg_list = []
if ut.get_argflag('--all'):
pkg_list = KNOWN_PKG_LIST
else:
print('specify --all to download all packages')
print('or specify --dl pkgname to download that package')
pkg_list.extend(ut.get_argval('--dl', list, []))
dryrun = ut.get_argflag('--dryrun')
pkg_exe_list = bootstrap_sysreq(pkg_list, dryrun=dryrun)
if ut.get_argflag('--run'):
for pkg_exe in pkg_exe_list:
if pkg_exe.endswith('.whl'):
ut.cmd('pip install ' + pkg_exe)
#ut.cmd(pkg_exe)
def bootstrap_sysreq(pkg_list='all', nocache=False, dryrun=False):
"""
pkg_list = ['line_profiler']
"""
# Still very hacky
if pkg_list == 'all':
pkg_list = get_uninstalled_project_names()
pkg_list_ = [resolve_alias(pkg) for pkg in pkg_list]
py_version = PY_VERSION
#python34_win32_x64_url = 'https://www.python.org/ftp/python/3.4.1/python-3.4.1.amd64.msi'
#python34_win32_x86_exe = ut.grab_file_url(python34_win32_x64_url)
all_href_list, page_str = get_unofficial_package_hrefs()
if len(all_href_list) > 0:
print('all_href_list[0] = ' + str(all_href_list[0]))
href_list = find_requested_hrefs(all_href_list, py_version, pkg_list_)
print('Available hrefs are:\n' + '\n'.join(href_list))
if not dryrun:
pkg_exe_list = download_win_packages(href_list)
text = '\n'.join(href_list) + '\n'
print('len(pkg_exe_list) = %r' % (len(pkg_exe_list),))
if len(pkg_exe_list) > 0:
text += ('Please Run:') + '\n'
text += ('\n'.join(['pip install ' + pkg for pkg in pkg_exe_list]))
print(text)
else:
print('No packages found!')
#print('TODO: Figure out how to run these installers without the GUI: ans use the new wheels')
#print(text)
#print(pkg_list_)
else:
print('dryrun=True')
print('href_list = %r' % (href_list,))
pkg_exe_list = []
return pkg_exe_list
def download_win_packages(href_list):
pkg_exe_list = []
#href = href_list[0]
#pkg_exe = ut.util_grabdata.grab_file_url(href, delay=3, spoof=True)
#pkg_exe_list += [pkg_exe]
## Execute download
for href in href_list:
# Download the file if you havent already done so
pkg_exe = ut.util_grabdata.grab_file_url(href, delay=3, spoof=True)
# Check to make sure it worked
nBytes = ut.get_file_nBytes(pkg_exe)
if nBytes < 1000:
print('There may be a problem with %r' % (pkg_exe,))
print('nBytes = %r' % (nBytes,))
RETRY_PROBLEMS = False
if RETRY_PROBLEMS:
# retry if file was probably corrupted
ut.delete(pkg_exe)
pkg_exe = ut.util_grabdata.grab_file_url(href, delay=3, spoof=True)
pkg_exe_list += [pkg_exe]
return pkg_exe_list
def find_requested_hrefs(all_href_list, py_version, pkg_list):
"""
Filters out everything but the requested urls
Returns the urls to download the requested installers
"""
print('Filtering to only requested HREFS')
href_list1, missing1 = filter_href_list(all_href_list, pkg_list, OS_VERSION, py_version)
#print('missing1 = %r' % (missing1,))
href_list2, missing2 = filter_href_list(all_href_list, missing1, OS_VERSION, py_version)
#print('missing2 = %r' % (missing2,))
#print(href_list2)
href_list3, missing3 = filter_href_list(all_href_list, missing2, 'x64', py_version.replace('p', 'P'))
#print('missing3 = %r' % (missing3,))
href_list4, missing4 = filter_href_list(all_href_list, missing3, 'any', py_version.replace('cp', 'py')[0:3])
if len(missing4) > 0:
print('Could not find a match for missing4=%r' % (missing4,))
#import Levenshtein
for pkg in missing4:
#dist_list = [Levenshtein.distance(href, pkg) for href in all_href_list]
dist_list = [0 if (href.find(pkg) > -1) else 100 for href in all_href_list]
closest_matche_xs = ut.list_argsort(dist_list)[::1]
print('Perhaps pkg=%r could match one of these?' % (pkg,))
closest_hrefs = ut.take(all_href_list, closest_matche_xs[0:3])
print(ut.indentjoin(closest_hrefs, '\n '))
href_list = href_list1 + href_list2 + href_list3 + href_list4
return href_list
def filter_href_list(all_href_list, win_pkg_list, os_version, py_version):
"""
Ignore:
win_pkg_list = ['pywin32']
OS_VERSION = 'win32'
PY_VERSION = 'py2.7'
os_version = OS_VERSION
py_version = PY_VERSION
"""
candidate_list = []
# hack
ignore_list = [
'vigranumpy',
]
for pkgname in win_pkg_list:
amdfunc = lambda x: x.find('amd64') > -1 if AMD64 else lambda x: x.find('amd64') == -1
from os.path import basename
filter_funcs = [
lambda x: x.find(pkgname) > -1,
amdfunc,
lambda x: py_version in x,
lambda x: os_version in x,
lambda x: not any([bad in x for bad in ignore_list]),
lambda x: basename(x).lower().startswith(pkgname.lower()),
]
_candidates = all_href_list
for func_ in filter_funcs:
_candidates = list(filter(func_, _candidates))
candidates = list(_candidates)
if len(candidates) > 1:
if ut.VERBOSE:
print('\n\n\n Found %d candidates for %r' % (len(candidates), pkgname,))
print(pkgname)
# parse out version
def get_href_version(href):
y = basename(href).split('-' + py_version)[0]
version = y.lower().split(pkgname.lower() + '-')[1]
return version
version_strs = list(map(get_href_version, candidates))
# Argsort the versions
from distutils.version import LooseVersion
from operator import itemgetter
versions = list(map(LooseVersion, version_strs))
sorted_tups = sorted(list(enumerate(versions)), key=itemgetter(1), reverse=True)
# Choose highest version
index = sorted_tups[0][0]
candidates = candidates[index:index + 1]
#print(candidates)
#print('Conflicting candidates: %r' % (candidates))
#print('\n\n\n')
candidate_list.extend(candidates)
missing = []
for pkgname in win_pkg_list:
if not any([pkgname in href for href in candidate_list]):
print('missing: %r' % pkgname)
missing += [pkgname]
return candidate_list, missing
def get_unofficial_package_hrefs(nocache=None):
"""
Downloads the entire webpage of available hrefs, or returns a cached copy
"""
if nocache is None:
nocache = FORCE
cachedir = ut.get_app_resource_dir('utool')
try:
if nocache:
raise Exception('cachemiss')
all_href_list = ut.load_cache(cachedir, 'win32_hrefs', 'all_href_list')
page_str = ut.load_cache(cachedir, 'win32_hrefs', 'page_str')
print('all_href_list cache hit')
return all_href_list, page_str
except Exception:
print('all_href_list cache miss')
pass
# Read page html
headers = { 'User-Agent' : 'Mozilla/5.0' }
print('Sending request to %r' % (UNOFFICIAL_WEBURL,))
req = urllib2.Request(UNOFFICIAL_WEBURL, None, headers)
page = urllib2.urlopen(req)
page_str = page.read()
encrypted_lines = list(filter(lambda x: x.find('onclick') > -1, page_str.split('\n')))
print('Read %d encrypted lines ' % (len(encrypted_lines,)))
# List of all download links, now choose wisely, because we don't want
# to hack for evil
#line = encrypted_lines[0]
def parse_encrypted(line):
"""
<script type="text/javascript">
// <![CDATA[
if (top.location!=location) top.location.href=location.href;
function dc(ml,mi){
var ot="";
for(var j=0;j<mi.length;j++)
ot+=String.fromCharCode(ml[mi.charCodeAt(j)-48]);
document.write(ot);
}
function dl1(ml,mi){
var ot="";
for(var j=0;j<mi.length;j++)
ot+=String.fromCharCode(ml[mi.charCodeAt(j)-48]);
location.href=ot;
}
function dl(ml,mi){
mi=mi.replace('<','<');
mi=mi.replace('>','>');
mi=mi.replace('&','&');
setTimeout(function(){ dl1(ml,mi) }, 1500);}
// ]]>
</script>
#start = line.find('javascript:dl') + len('javascript:dl') + 2
#end = line.find('title') - 4
#code = line[start: end]
#mid = code.find(']')
#left = code[0:mid]
#right = code[mid + 4:]
#ml = left
#mi = right
"""
_, ml, mi, _ = parse.parse('{}javascript:dl([{}], "{}"){}', line)
mi_ = mi.replace('<', '<').replace('>', '>').replace('&', '&')
#ml_ = eval('[' + ml + ']')
ml_ = eval(ml)
href_ = ''.join([chr(ml_[ord(michar) - 48]) for michar in mi_])
href = ''.join([UNOFFICIAL_WEBURL, href_])
return href
all_href_list = list(map(parse_encrypted, encrypted_lines))
print('decrypted %d lines' % (len(all_href_list)))
ut.save_cache(cachedir, 'win32_hrefs', 'all_href_list', all_href_list)
ut.save_cache(cachedir, 'win32_hrefs', 'page_str', page_str)
return all_href_list, page_str
def uninstall_everything_win32():
r"""
try to figure out way to uninstall things easy
References:
http://msdn.microsoft.com/en-us/library/aa372024(VS.85).aspx
http://www.sevenforums.com/tutorials/272460-programs-uninstall-using-command-prompt-windows.html
"""
uninstall_script = ut.codeblock( # NOQA
"""
Msiexec /uninstall "Python 2.7 ipython-2.1.0"
Msiexec /uninstall "Python 2.7 ipython-2.1.0"
Msiexec /x opencv-python-py2.7.msi
Msiexec /x "Python 2.7 opencv-python-2.4.10" /passive
C:/Python27/Removenumpy.exe
C:\Python27\Removeopencv-python.exe
Msiexec /uninstall "C:\Python27\Removeopencv-python.exe"
Unofficial package uninstall commands were found in the regestry here:
HKEY_LOCAL_MACHINE\SOFTWARE\Wow6432Node\Microsoft\Windows\CurrentVersion\Uninstall
python -c "import utool"
# THESE COMMAND WORK
"C:/Python27/Removeopencv-python.exe" -u "C:/Python27/opencv-python-wininst.log"
"C:/Python27/Removenumpy.exe" -u "C:/Python27/numpy-wininst.log"
"C:/Python27/Removescipy.exe" -u "C:/Python27/scipy-wininst.log"
"C:/Python27/RemoveCython.exe" -u "C:/Python27/Cython-wininst.log"
"C:/Python27/Removeh5py.exe" -u "C:/Python27/h5py-wininst.log"
"C:/Python27/Removeipython.exe" -u "C:/Python27/ipython-wininst.log"
"C:/Python27/RemovePillow.exe" -u "C:/Python27/Pillow-wininst.log"
"C:/Python27/Removematplotlib.exe" -u "C:/Python27/matplotlib-wininst.log"
"C:/Python27/Removepsutil.exe" -u "C:/Python27/psutil-wininst.log"
"C:/Python27/Removeline_profiler.exe" -u "C:/Python27/line_profiler-wininst.log"
"C:/Python27/RemovePygments.exe" -u "C:/Python27/Pygments-wininst.log"
"C:/Python27/Removepyparsing.exe" -u "C:/Python27/pyparsing-wininst.log"
"C:/Python27/Removepyreadline.exe" -u "C:/Python27/pyreadline-wininst.log"
"C:/Python27/Removepywin32.exe" -u "C:/Python27/pywin32-wininst.log"
"C:/Python27/Removepyzmq.exe" -u "C:/Python27/pyzmq-wininst.log"
"C:/Python27/Removesix.exe" -u "C:/Python27/six-wininst.log"
"C:/Python27/RemoveSphinx.exe" -u "C:/Python27/Sphinx-wininst.log"
"C:/Python27/Removetornado.exe" -u "C:/Python27/tornado-wininst.log"
"C:/Python27/Removetables.exe" -u "C:/Python27/tables-wininst.log"
"C:/Python27/Removenumexpr.exe" -u "C:/Python27/numexpr-wininst.log"
"C:/Python27/Removepandas.exe" -u "C:/Python27/pandas-wininst.log"
python -c "import utool as ut; print('\n'.join(ut.glob('C:/Python27', 'Remove*.exe')))"
pip list
pip uninstall Sphinx -y
pip uninstall UNKNOWN -y
pip uninstall Theano -y
pip uninstall Pillow -y
pip uninstall python-qt -y
pip uninstall pyrf -y
pip uninstall pyfiglet -y
pip uninstall pyhesaff -y
pip uninstall networkx -y
pip uninstall detecttools -y
pip uninstall Flask -y
pip uninstall flann -y
pip uninstall psutil -y
pip uninstall simplejson -y
pip uninstall objgraph -y
pip uninstall selenium -y
pip uninstall scikit-image -y
pip uninstall scikit-learn -y
pip uninstall statsmodels -y
pip uninstall parse -y
pip uninstall astroid -y
pip uninstall colorama -y
pip uninstall coverage -y
pip uninstall decorator -y
pip uninstall greenlet -y
pip uninstall gevent -y
pip uninstall guppy -y
pip uninstall memory-profiler -y
pip uninstall nose -y
pip uninstall utool -y
pip uninstall rope -y
pip uninstall requests -y
pip uninstall sphinxcontrib-napoleon -y
pip uninstall RunSnakeRun -y
pip uninstall SquareMap -y
pip uninstall PyInstaller -y
pip uninstall pytest -y
pip uninstall pylru -y
pip uninstall setproctitle -y
pip uninstall functools32 -y
pip install argparse --upgrade
pip install virtualenv --upgrade
pip install astor --upgrade
pip install autopep8 --upgrade
pip install docutils --upgrade
pip install editdistance --upgrade
pip install flake8 --upgrade
pip install importlib --upgrade
pip install openpyxl --upgrade
pip install pep8 --upgrade
pip install pip --upgrade
pip install pyfiglet --upgrade
pip install pyflakes --upgrade
pip install pylint --upgrade
pip install python-dateutil --upgrade
pip install python-Levenshtein --upgrade
pip install pytz --upgrade
pip install rope --upgrade
pip install setuptools --upgrade
pip install six --upgrade
pip install tox --upgrade
pip install Werkzeug --upgrade
pip install WinSys-3.x --upgrade
pip install backports.ssl-match-hostname --upgrade
pip install certifi --upgrade
pip install distribute --upgrade
pip install dragonfly --upgrade
pip install itsdangerous --upgrade
pip install jedi --upgrade
pip install Jinja2 --upgrade
pip install logilab-common --upgrade
pip install MarkupSafe --upgrade
pip install mccabe --upgrade
pip install patsy --upgrade
pip install py --upgrade
pip install pycom --upgrade
C:\Users\joncrall\AppData\Roaming\utool\pip-1.5.6.win32-py2.7.exe
C:\Users\joncrall\AppData\Roaming\utool\python-dateutil-2.2.win32-py2.7.exe
C:\Users\joncrall\AppData\Roaming\utool\setuptools-5.8.win32-py2.7.exe
C:\Users\joncrall\AppData\Roaming\utool\scipy-0.14.0.win32-py2.7.exe
C:\Users\joncrall\AppData\Roaming\utool\PyQt4-4.11.3-gpl-Py2.7-Qt4.8.6-x64.exe
C:\Users\joncrall\AppData\Roaming\utool\requests-2.4.3.win32-py2.7.exe
C:\Users\joncrall\AppData\Roaming\utool\psutil-2.1.3.win32-py2.7.exe
C:\Users\joncrall\AppData\Roaming\utool\python-dateutil-2.2.win32-py2.7.exe
C:\Users\joncrall\AppData\Roaming\utool\pyreadline-2.0.win32-py2.7.exe
C:\Users\joncrall\AppData\Roaming\utool\pyparsing-2.0.3.win32-py2.7.exe
C:\Users\joncrall\AppData\Roaming\utool\Pygments-2.0.1.win32-py2.7.exe
C:\Users\joncrall\AppData\Roaming\utool\pyzmq-14.4.1.win32-py2.7.exe
C:\Users\joncrall\AppData\Roaming\utool\tornado-4.0.2.win32-py2.7.exe
C:\Users\joncrall\AppData\Roaming\utool\Pillow-2.6.1.win32-py2.7.exe
C:\Users\joncrall\AppData\Roaming\utool\Cython-0.21.1.win32-py2.7.exe
C:\Users\joncrall\AppData\Roaming\utool\ipython-2.3.1.win32-py2.7.exe
C:\Users\joncrall\AppData\Roaming\utool\numpy-MKL-1.9.1.win32-py2.7.exe
C:\Users\joncrall\AppData\Roaming\utool\scipy-0.15.0b1.win32-py2.7.exe
C:\Users\joncrall\AppData\Roaming\utool\scikit-learn-0.15.2.win32-py2.7.exe
C:\Users\joncrall\AppData\Roaming\utool\matplotlib-1.4.2.win32-py2.7.exe
""")
if __name__ == '__main__':
main()
| apache-2.0 |
istellartech/OpenGoddard | examples/04_Goddard_0knot.py | 1 | 6069 | # -*- coding: utf-8 -*-
# Copyright 2017 Interstellar Technologies Inc. All Rights Reserved.
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from OpenGoddard.optimize import Problem, Guess, Condition, Dynamics
class Rocket:
g0 = 1.0 # Gravity at surface [-]
def __init__(self):
self.H0 = 1.0 # Initial height
self.V0 = 0.0 # Initial velocity
self.M0 = 1.0 # Initial mass
self.Tc = 3.5 # Use for thrust
self.Hc = 500 # Use for drag
self.Vc = 620 # Use for drag
self.Mc = 0.6 # Fraction of initial mass left at end
self.c = 0.5 * np.sqrt(self.g0*self.H0) # Thrust-to-fuel mass
self.Mf = self.Mc * self.M0 # Final mass
self.Dc = 0.5 * self.Vc * self.M0 / self.g0 # Drag scaling
self.T_max = self.Tc * self.g0 * self.M0 # Maximum thrust
def dynamics(prob, obj, section):
h = prob.states(0, section)
v = prob.states(1, section)
m = prob.states(2, section)
T = prob.controls(0, section)
Dc = obj.Dc
c = obj.c
drag = 1 * Dc * v ** 2 * np.exp(-obj.Hc * (h - obj.H0) / obj.H0)
g = obj.g0 * (obj.H0 / h)**2
dx = Dynamics(prob, section)
dx[0] = v
dx[1] = (T - drag) / m - g
dx[2] = - T / c
return dx()
def equality(prob, obj):
h = prob.states_all_section(0)
v = prob.states_all_section(1)
m = prob.states_all_section(2)
T = prob.controls_all_section(0)
tf = prob.time_final(-1)
result = Condition()
# event condition
result.equal(h[0], obj.H0)
result.equal(v[0], obj.V0)
result.equal(m[0], obj.M0)
result.equal(v[-1], 0.0)
result.equal(m[-1], obj.Mf)
return result()
def inequality(prob, obj):
h = prob.states_all_section(0)
v = prob.states_all_section(1)
m = prob.states_all_section(2)
T = prob.controls_all_section(0)
tf = prob.time_final(-1)
result = Condition()
# lower bounds
result.lower_bound(h, obj.H0)
result.lower_bound(v, 0.0)
result.lower_bound(m, obj.Mf)
result.lower_bound(T, 0.0)
result.lower_bound(tf, 0.1)
# upper bounds
result.upper_bound(m, obj.M0)
result.upper_bound(T, obj.T_max)
return result()
def cost(prob, obj):
h = prob.states_all_section(0)
return -h[-1]
# ========================
plt.close("all")
# Program Starting Point
time_init = [0.0, 0.3]
n = [50]
num_states = [3]
num_controls = [1]
max_iteration = 30
flag_savefig = True
savefig_file = "04_Goddard/04_0knot_"
# ------------------------
# set OpenGoddard class for algorithm determination
prob = Problem(time_init, n, num_states, num_controls, max_iteration)
# ------------------------
# create instance of operating object
# Nondimensionalization of parameters
obj = Rocket()
# ========================
# Initial parameter guess
# altitude profile
H_init = Guess.cubic(prob.time_all_section, 1.0, 0.0, 1.010, 0.0)
# Guess.plot(prob.time_all_section, H_init, "Altitude", "time", "Altitude")
# if(flag_savefig):plt.savefig(savefig_file + "guess_alt" + ".png")
# velocity
V_init = Guess.linear(prob.time_all_section, 0.0, 0.0)
# Guess.plot(prob.time_all_section, V_init, "Velocity", "time", "Velocity")
# mass profile
M_init = Guess.cubic(prob.time_all_section, 1.0, -0.6, 0.6, 0.0)
# Guess.plot(prob.time_all_section, M_init, "Mass", "time", "Mass")
# if(flag_savefig):plt.savefig(savefig_file + "guess_mass" + ".png")
# thrust profile
T_init = Guess.cubic(prob.time_all_section, 3.5, 0.0, 0.0, 0.0)
# Guess.plot(prob.time_all_section, T_init, "Thrust Guess", "time", "Thrust")
# if(flag_savefig):plt.savefig(savefig_file + "guess_thrust" + ".png")
plt.show()
# ========================
# Substitution initial value to parameter vector to be optimized
prob.set_states_all_section(0, H_init)
prob.set_states_all_section(1, V_init)
prob.set_states_all_section(2, M_init)
prob.set_controls_all_section(0, T_init)
# ========================
# Main Process
# Assign problem to SQP solver
prob.dynamics = [dynamics]
prob.knot_states_smooth = []
prob.cost = cost
prob.cost_derivative = None
prob.equality = equality
prob.inequality = inequality
def display_func():
h = prob.states_all_section(0)
print("max altitude: {0:.5f}".format(h[-1]))
prob.solve(obj, display_func, ftol=1e-10)
# ========================
# Post Process
# ------------------------
# Convert parameter vector to variable
h = prob.states_all_section(0)
v = prob.states_all_section(1)
m = prob.states_all_section(2)
T = prob.controls_all_section(0)
time = prob.time_update()
# ------------------------
# Calculate necessary variables
Dc = 0.5 * 620 * 1.0 / 1.0
drag = 1 * Dc * v ** 2 * np.exp(-500 * (h - 1.0) / 1.0)
g = 1.0 * (1.0 / h)**2
# ------------------------
# Visualizetion
plt.figure()
plt.title("Altitude profile")
plt.plot(time, h, marker="o", label="Altitude")
for line in prob.time_knots():
plt.axvline(line, color="k", alpha=0.5)
plt.grid()
plt.xlabel("time [s]")
plt.ylabel("Altitude [-]")
if(flag_savefig): plt.savefig(savefig_file + "altitude" + ".png")
plt.figure()
plt.title("Velocity")
plt.plot(time, v, marker="o", label="Velocity")
for line in prob.time_knots():
plt.axvline(line, color="k", alpha=0.5)
plt.grid()
plt.xlabel("time [s]")
plt.ylabel("Velocity [-]")
if(flag_savefig): plt.savefig(savefig_file + "velocity" + ".png")
plt.figure()
plt.title("Mass")
plt.plot(time, m, marker="o", label="Mass")
for line in prob.time_knots():
plt.axvline(line, color="k", alpha=0.5)
plt.grid()
plt.xlabel("time [s]")
plt.ylabel("Mass [-]")
if(flag_savefig): plt.savefig(savefig_file + "mass" + ".png")
plt.figure()
plt.title("Thrust profile")
plt.plot(time, T, marker="o", label="Thrust")
plt.plot(time, drag, marker="o", label="Drag")
plt.plot(time, g, marker="o", label="Gravity")
for line in prob.time_knots():
plt.axvline(line, color="k", alpha=0.5)
plt.grid()
plt.xlabel("time [s]")
plt.ylabel("Thrust [-]")
plt.legend(loc="best")
if(flag_savefig): plt.savefig(savefig_file + "force" + ".png")
plt.show()
| mit |
abhishekkrthakur/scikit-learn | sklearn/decomposition/__init__.py | 99 | 1331 | """
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, ProjectedGradientNMF
from .pca import PCA, RandomizedPCA
from .incremental_pca import IncrementalPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .truncated_svd import TruncatedSVD
from .fastica_ import FastICA, fastica
from .dict_learning import (dict_learning, dict_learning_online, sparse_encode,
DictionaryLearning, MiniBatchDictionaryLearning,
SparseCoder)
from .factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
__all__ = ['DictionaryLearning',
'FastICA',
'IncrementalPCA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'ProjectedGradientNMF',
'RandomizedPCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD']
| bsd-3-clause |
m3wolf/scimap | scimap/peakfitting.py | 1 | 16766 | # -*- coding: utf-8 -*-
#
# Copyright © 2016 Mark Wolf
#
# This file is part of scimap.
#
# Scimap is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Scimap is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Scimap. If not, see <http://www.gnu.org/licenses/>.
"""Routines related to fitting curves to data (eg fitting XANES data
with a Gaussian curve."""
from collections import namedtuple
import math
import warnings
import numpy as np
from scipy import optimize
# import pandas as pd
from pandas import Series
from . import exceptions
from . import plots
# How strongly to penalize negative peak heights, etc
BASE_PENALTY = 300
def remove_peak_from_df(x, y, xrange):
"""Remove data with the given xrange from the x and y data."""
is_peak = np.logical_and(x>xrange[0], x<xrange[1])
newx = x[~is_peak]
newy = y[~is_peak]
return newx, newy
def gaussian_fwhm(width_parameter):
"""Calculates full-width half max based on width parameter."""
# Taken from wikipedia page for "Gaussian Function".
return 2.35482 * width_parameter
def cauchy_fwhm(width_parameter):
"""Calculates full-width half max based on width parameter."""
return 2 * width_parameter
def discrete_fwhm(x, y):
"""Compute numerically the full-width half-max of peak described by x
and y data.
"""
maxheight = y.max()
# Split the dataset into an upper half and a lower half
maxidx = y.argmax()
leftx = x[:maxidx+1]
rightx = x[maxidx:]
lefty = y[:maxidx+1]
righty = y[maxidx:]
# Only interested in data that are less than half-max
rightx = rightx[righty < maxheight / 2]
leftx = leftx[lefty < maxheight / 2]
# Find the nearest datum to halfmax in each half
if len(rightx) > 0:
rightx = rightx.min()
else:
rightx = float("nan")
if len(leftx) > 0:
leftx = leftx.max()
else:
leftx = float("nan")
# Check for one-sided peaks (such as XAS edge)
if math.isnan(rightx):
fwhm = 2 * abs(leftx)
elif math.isnan(leftx):
fwhm = 2 * abs(rightx)
else:
fwhm = abs(rightx - leftx)
return fwhm
class PeakFit():
Parameters = namedtuple('Parameters', ('height', 'center', 'width'))
height = 450
center = 35.15
width = 0.02
def __repr__(self):
return "<{cls}: {center}>".format(cls=self.__class__.__name__,
center=round(self.center, 3))
@property
def parameters(self):
return self.Parameters(self.height, self.center, self.width)
@parameters.setter
def parameters(self, value):
params = self.Parameters(*value)
self.height = params.height
self.center = params.center
self.width = params.width
def evaluate(self, x):
"""Evaluate this fitted subpeak at given x values."""
return self.kernel(x, **self.parameters._asdict())
def penalty(self, params):
"""Rules for contraining the fitting algorithm. 0 means no penalty."""
penalty = 0
# Penalize negative peak heights
if params.height < 0:
penalty += BASE_PENALTY
if params.width < 0:
penalty += BASE_PENALTY
return penalty
def initial_parameters(self, x, y, center=0, height=None):
"""Estimate intial parameters from data. Calling function is
responsible for determining peak center since multiple peaks
may be involved. If `height` is None (default) it is estimated
from the maximum value in the data.
"""
# Convert FWHM to stdDev (taken from wolfram alpha page for Gaussian)
stdDev = discrete_fwhm(x, y) / 2.3548
# Decide which value to use for the peak height
if height is None:
new_height = y.max()
else:
new_height = height
# Prepare tuples of parameters
p1 = self.Parameters(height=new_height,
center=center,
width=stdDev)
return p1
class EstimatedFit(PeakFit):
"""Fallback fit using just estimated intial parameters."""
pass
class GaussianFit(PeakFit):
@staticmethod
def kernel(x, height, center, width):
"""
Compute a Gaussian distribution of peak height and width around center.
x is an array of points for which to return y values.
"""
y = height * np.exp(-np.square(x - center) / 2 / np.square(width))
return y
class CauchyFit(PeakFit):
@staticmethod
def kernel(x, height, center, width):
"""
Compute a Cauchy (Lorentz) distribution of peak height and width
around center. x is an array of points for which to return y
values.
"""
y = (
height * np.square(width) /
(np.square(width) + np.square(x - center))
)
return y
class PearsonVIIFit(PeakFit):
@staticmethod
def kernel(x, height, center, width, exponent):
raise NotImplementedError
class PseudoVoigtFit(PeakFit):
height_g = 450
height_c = 450
center = 35.15
width_g = 0.05
width_c = 0.05
eta = 0.5
Parameters = namedtuple('PseudoVoigtParameters', ('height_g', 'height_c',
'center',
'width_g', 'width_c',
'eta'))
@property
def height(self):
return self.height_g + self.height_c
@property
def width(self):
return self.width_g + self.width_c
def fwhm(self):
# Gaussian component
fwhm = self.eta * gaussian_fwhm(self.width_g)
# Cauchy component
fwhm += (1 - self.eta) * cauchy_fwhm(self.width_c)
return fwhm
@property
def parameters(self):
return self.Parameters(height_g=self.height_g,
height_c=self.height_c,
center=self.center,
width_g=self.width_g,
width_c=self.width_c,
eta=self.eta)
@parameters.setter
def parameters(self, value):
params = self.Parameters(*value)
self.height_g = params.height_g
self.height_c = params.height_c
self.center = params.center
self.width_g = params.width_g
self.width_c = params.width_c
self.eta = params.eta
def penalty(self, params):
penalty = 0
# Prepare parameters for penalty from parent class
parent = super(PseudoVoigtFit, self)
gParams = parent.Parameters(height=params.height_g,
center=params.center,
width=params.width_g)
penalty += parent.penalty(gParams)
cParams = parent.Parameters(height=params.height_c,
center=params.center,
width=params.width_c)
penalty += parent.penalty(cParams)
# Check for eta between zero and 1
if not(0 < params.eta < 1):
penalty += BASE_PENALTY
return penalty
def initial_parameters(self, *args, **kwargs):
"""Estimate intial parameters for this peak. Arguments are passed to a
`PeakFit` object then modified for psuedo-voigt compatibility."""
# Use a vanilla peak fit to estimate the initial parameters
vanilla_params = PeakFit().initial_parameters(*args, **kwargs)
# Prepare tuples of parameters
params = self.Parameters(height_g=vanilla_params.height,
height_c=vanilla_params.height,
center=vanilla_params.center,
width_g=vanilla_params.width,
width_c=vanilla_params.width,
eta=0.5)
return params
@staticmethod
def kernel(x, height_g, height_c, center, width_g, width_c, eta):
"""
Compute a linear combination of (G)aussian and (C)achy functions:
y = eta*G + (1-eta)*C
params are tuples of (height, center, width) to pass to the respective
functions. x is an array of points for which to return y
values.
"""
g = GaussianFit.kernel(x, height_g, center, width_g)
c = CauchyFit.kernel(x, height_c, center, width_c)
y = eta * g + (1 - eta) * c
return y
class Peak():
"""
A single peak in data. The actual peak types (Gaussian, Cauchy,
etc.) are described in PeakFit objects.
"""
vertical_offset = 0
fit_list = []
fit_classes = {
'gaussian': GaussianFit,
'cauchy': CauchyFit,
'pearson vii': PearsonVIIFit,
'pseudo-voigt': PseudoVoigtFit,
'estimated': EstimatedFit,
}
def __init__(self, num_peaks=1, method='gaussian'):
"""Arguments
---------
num_peaks : How many subpeaks should be used for pitting.
"""
self.num_peaks = num_peaks
self.FitClass = self.fit_classes[method.lower()]
def __repr__(self):
name = "<{cls}: {center}>".format(
cls=self.__class__.__name__,
center=self.center()
)
return name
def split_parameters(self, params):
"""
Take a full list of parameters and divide it into groups for each
subpeak.
"""
chunkSize = int(len(params) / self.num_peaks)
groups = []
for i in range(0, len(params), chunkSize):
end = i + chunkSize
groups.append(params[i:end])
return groups
def guess_parameters(self, x, y):
"""Use the data to guess appropriate starting parameters before
fitting can take place. Returns a list the same length as the
number of sub-peaks. Each entry is a tuple of sub-peak
parameters.
Arguments
---------
- xs (array-like) : Independent data to use for guessing
peak properties.
- ys (array-like) : Dependent data to use for guess peak
properties.
"""
guess = []
# Guess peak position based on maximum value
max_idx = y.argmax()
# peak_max = x[max_idx]
# Guess values for width (based on fitting method)
for i in range(0, self.num_peaks):
sub_params = self.FitClass().initial_parameters(x=x, y=y,
center=max_idx)
guess.append(sub_params)
return guess
def fit(self, x, y):
"""Least squares refinement of a function to the data.
Arguments
---------
- x : Iterable of x values to fit against
- y : Iterable of y values to fit against
- num_peaks (int) : How many overlapping peaks should be
used. Eg. X-ray data often has kα1 and kα2 peaks (default 1)
- method (str) : Selects which peak shape to use. Valid choices are:
- 'Gaussian'
- 'Cauchy'
- 'Pearson VII'
- 'Pseudo-Voigt'
- 'estimated'
"""
# Check that actual data was passed
if len(x) == 0 or len(y) == 0:
raise exceptions.RefinementError("No data to fit.")
self.x_range = (x[0], x[-1])
# Create fit object(s)
self.fit_list = []
FitClass = self.FitClass
for i in range(0, self.num_peaks):
self.fit_list.append(FitClass())
# Define objective function
def objective(x, *params):
# Unpack the parameters
paramGroups = self.split_parameters(params)
result = np.zeros_like(x)
for idx, fit in enumerate(self.fit_list):
y = fit.kernel(x, *paramGroups[idx])
result += y
return result
# Error function, penalizes values out of range
def residual_error(obj_params):
penalty = 0
# Calculate dual peak penalties
paramlist = self.split_parameters(obj_params)
paramlist = [FitClass.Parameters(*p) for p in paramlist]
for fit, paramTuple in zip(self.fit_list, paramlist):
# Calculate single peak penalties
penalty += fit.penalty(paramTuple)
result = objective(x, *obj_params)
return (y - result)**2 + penalty
initialParameters = self.guess_parameters(x=x, y=y)
# Minimize the residual least squares
try:
result = optimize.leastsq(residual_error,
x0=initialParameters,
full_output=True)
except RuntimeError as e:
# Could not find optimum fit
angle = (self.x_range[0] + self.x_range[1]) / 2
msg = "Peak ~{angle:.1f}°: {error}".format(angle=angle, error=e)
raise exceptions.PeakFitError(msg)
except TypeError:
raise exceptions.RefinementError
else:
popt = result[0]
# Split optimized parameters by number of fits
paramsList = self.split_parameters(popt)
# Save optimized parameters for each fit
for idx, fit in enumerate(self.fit_list):
fit.parameters = paramsList[idx]
# Save goodness-of-fit
# self._goodness = self.goodness(data)
def predict(self, xdata=None):
"""Get a dataframe of the predicted peak fits.
Arguments
---------
xdata (numpy array) : An array of x values to use as the index
for predicting y values. If None (default), a numpy linspace
will be generated within the range intially used for the
fitting.
"""
if xdata is None:
# Create a default linspace to use as xdata
x = np.linspace(self.x_range[0],
self.x_range[1],
num=1000)
else:
x = xdata
y = np.zeros_like(x)
for fit in self.fit_list:
y += fit.evaluate(x)
return y
def residuals(self, observations):
"""Calculate the differences at each point between the fit and the
provided data.
Arguments
---------
observations (pandas series): The original data against which
to compare the fit.
"""
predicted = self.predict(xdata=observations.index)
residuals = observations - predicted
return residuals
def goodness(self, observations=None):
"""Calculate the goodness of fit. Returns the sum of squared residuals
divided by degrees of freedom. Lower numbers describe better
fit.
Arguments
---------
observations (pandas series): The original data against which
to compare the fit. If None (default), use the data from
the most recent call to `fit()`.
"""
if observations is None:
goodness = self._goodness
else:
# Determine total residual
sum_of_squares = (self.residuals(observations)**2).sum()
# Divide by degrees of freedom
goodness = math.sqrt(sum_of_squares) / (len(observations) - 1)
return goodness
def center(self):
centers = [f.center for f in self.fit_list]
mean_center = sum(centers) / self.num_peaks
return mean_center
def fwhm(self):
"""Full width at half-maximum. A discrete curve is predicted from the
fit and the the width is found numerically.
"""
# Create a default linspace to use as xdata
x = np.linspace(self.x_range[0],
self.x_range[1],
num=2000)
predicted = self.predict(xdata=x)
width = discrete_fwhm(x, predicted)
return width
def plot_fit(self, ax=None, background=None):
predicted = self.predict()
if ax is None:
ax = plots.new_axes()
ax.plot(predicted.index, predicted.values, label="overall fit")
return ax
| gpl-3.0 |
Obus/scikit-learn | sklearn/metrics/pairwise.py | 104 | 42995 | # -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Philippe Gervais <philippe.gervais@inria.fr>
# Lars Buitinck <larsmans@gmail.com>
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
import itertools
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.fixes import partial
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype = _return_float_dtype(X, Y)
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr', dtype=dtype)
else:
X = check_array(X, accept_sparse='csr', dtype=dtype)
Y = check_array(Y, accept_sparse='csr', dtype=dtype)
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if x varies but y remains unchanged, then the right-most dot
product `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
# should not need X_norm_squared because if you could precompute that as
# well as Y, then you should just pre-compute the output and not even
# call this function.
X, Y = check_pairwise_arrays(X, Y)
if Y_norm_squared is not None:
YY = check_array(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
XX = YY.T
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x][flags] = min_indices[flags] + chunk_y.start
values[chunk_x][flags] = min_values[flags]
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances(3, 3)#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances(3, 2)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances(2, 3)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""
Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
degree : int, default 3
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
coef0 : int, default 1
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : ndarray or sparse array, shape: (n_samples_Y, n_features)
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T, dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances, }
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
if n_jobs == 1:
# Special case to avoid picklability checks in delayed
return func(X, Y, **kwds)
# TODO: in some cases, backend='threading' may be appropriate
fd = delayed(func)
ret = Parallel(n_jobs=n_jobs, verbose=0)(
fd(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def _pairwise_callable(X, Y, metric, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if n_jobs == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
return X
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
| bsd-3-clause |
bowenliu16/deepchem | deepchem/dock/binding_pocket.py | 1 | 11449 | """
Computes putative binding pockets on protein.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "Bharath Ramsundar"
__copyright__ = "Copyright 2017, Stanford University"
__license__ = "GPL"
import os
import tempfile
import numpy as np
import openbabel as ob
from rdkit import Chem
from subprocess import call
from scipy.spatial import ConvexHull
from deepchem.feat import hydrogenate_and_compute_partial_charges
from deepchem.feat.atomic_coordinates import AtomicCoordinates
from deepchem.feat.grid_featurizer import load_molecule
from deepchem.feat.binding_pocket_features import BindingPocketFeaturizer
from deepchem.feat.fingerprints import CircularFingerprint
from deepchem.models.sklearn_models import SklearnModel
from deepchem.data.datasets import NumpyDataset
def extract_active_site(protein_file, ligand_file, cutoff=4):
"""Extracts a box for the active site."""
protein_coords = load_molecule(protein_file, add_hydrogens=False)[0]
ligand_coords = load_molecule(ligand_file, add_hydrogens=False)[0]
num_ligand_atoms = len(ligand_coords)
num_protein_atoms = len(protein_coords)
pocket_inds = []
pocket_atoms = set([])
for lig_atom_ind in range(num_ligand_atoms):
lig_atom = ligand_coords[lig_atom_ind]
for protein_atom_ind in range(num_protein_atoms):
protein_atom = protein_coords[protein_atom_ind]
if np.linalg.norm(lig_atom - protein_atom) < cutoff:
if protein_atom_ind not in pocket_atoms:
pocket_atoms = pocket_atoms.union(set([protein_atom_ind]))
# Should be an array of size (n_pocket_atoms, 3)
pocket_atoms = list(pocket_atoms)
n_pocket_atoms = len(pocket_atoms)
pocket_coords = np.zeros((n_pocket_atoms, 3))
for ind, pocket_ind in enumerate(pocket_atoms):
pocket_coords[ind] = protein_coords[pocket_ind]
x_min = int(np.floor(np.amin(pocket_coords[:, 0])))
x_max = int(np.ceil(np.amax(pocket_coords[:, 0])))
y_min = int(np.floor(np.amin(pocket_coords[:, 1])))
y_max = int(np.ceil(np.amax(pocket_coords[:, 1])))
z_min = int(np.floor(np.amin(pocket_coords[:, 2])))
z_max = int(np.ceil(np.amax(pocket_coords[:, 2])))
return (((x_min, x_max), (y_min, y_max), (z_min, z_max)),
pocket_atoms, pocket_coords)
def compute_overlap(mapping, box1, box2):
"""Computes overlap between the two boxes.
Overlap is defined as % atoms of box1 in box2. Note that
overlap is not a symmetric measurement.
"""
atom1 = set(mapping[box1])
atom2 = set(mapping[box2])
return len(atom1.intersection(atom2))/float(len(atom1))
def get_all_boxes(coords, pad=5):
"""Get all pocket boxes for protein coords.
We pad all boxes the prescribed number of angstroms.
TODO(rbharath): It looks like this may perhaps be non-deterministic?
"""
hull = ConvexHull(coords)
boxes = []
for triangle in hull.simplices:
# coords[triangle, 0] gives the x-dimension of all triangle points
# Take transpose to make sure rows correspond to atoms.
points = np.array(
[coords[triangle, 0], coords[triangle, 1], coords[triangle, 2]]).T
# We voxelize so all grids have integral coordinates (convenience)
x_min, x_max = np.amin(points[:, 0]), np.amax(points[:, 0])
x_min, x_max = int(np.floor(x_min))-pad, int(np.ceil(x_max))+pad
y_min, y_max = np.amin(points[:, 1]), np.amax(points[:, 1])
y_min, y_max = int(np.floor(y_min))-pad, int(np.ceil(y_max))+pad
z_min, z_max = np.amin(points[:, 2]), np.amax(points[:, 2])
z_min, z_max = int(np.floor(z_min))-pad, int(np.ceil(z_max))+pad
boxes.append(((x_min, x_max), (y_min, y_max), (z_min, z_max)))
return boxes
def boxes_to_atoms(atom_coords, boxes):
"""Maps each box to a list of atoms in that box.
TODO(rbharath): This does a num_atoms x num_boxes computations. Is
there a reasonable heuristic we can use to speed this up?
"""
mapping = {}
for box_ind, box in enumerate(boxes):
box_atoms = []
(x_min, x_max), (y_min, y_max), (z_min, z_max) = box
print("Handing box %d/%d" % (box_ind, len(boxes)))
for atom_ind in range(len(atom_coords)):
atom = atom_coords[atom_ind]
x_cont = x_min <= atom[0] and atom[0] <= x_max
y_cont = y_min <= atom[1] and atom[1] <= y_max
z_cont = z_min <= atom[2] and atom[2] <= z_max
if x_cont and y_cont and z_cont:
box_atoms.append(atom_ind)
mapping[box] = box_atoms
return mapping
def merge_boxes(box1, box2):
"""Merges two boxes."""
(x_min1, x_max1), (y_min1, y_max1), (z_min1, z_max1) = box1
(x_min2, x_max2), (y_min2, y_max2), (z_min2, z_max2) = box2
x_min = min(x_min1, x_min2)
y_min = min(y_min1, y_min2)
z_min = min(z_min1, z_min2)
x_max = max(x_max1, x_max2)
y_max = max(y_max1, y_max2)
z_max = max(z_max1, z_max2)
return ((x_min, x_max), (y_min, y_max), (z_min, z_max))
def merge_overlapping_boxes(mapping, boxes, threshold=.8):
"""Merge boxes which have an overlap greater than threshold.
TODO(rbharath): This merge code is terribly inelegant. It's also quadratic
in number of boxes. It feels like there ought to be an elegant divide and
conquer approach here. Figure out later...
"""
num_boxes = len(boxes)
outputs = []
for i in range(num_boxes):
box = boxes[0]
new_boxes = []
new_mapping = {}
# If overlap of box with previously generated output boxes, return
contained = False
for output_box in outputs:
# Carry forward mappings
new_mapping[output_box] = mapping[output_box]
if compute_overlap(mapping, box, output_box) == 1:
contained = True
if contained:
continue
# We know that box has at least one atom not in outputs
unique_box = True
for merge_box in boxes[1:]:
overlap = compute_overlap(mapping, box, merge_box)
if overlap < threshold:
new_boxes.append(merge_box)
new_mapping[merge_box] = mapping[merge_box]
else:
# Current box has been merged into box further down list.
# No need to output current box
unique_box = False
merged = merge_boxes(box, merge_box)
new_boxes.append(merged)
new_mapping[merged] = list(
set(mapping[box]).union(set(mapping[merge_box])))
if unique_box:
outputs.append(box)
new_mapping[box] = mapping[box]
boxes = new_boxes
mapping = new_mapping
return outputs, mapping
class BindingPocketFinder(object):
"""Abstract superclass for binding pocket detectors"""
def find_pockets(self, protein_file, ligand_file):
"""Finds potential binding pockets in proteins."""
raise NotImplementedError
class ConvexHullPocketFinder(BindingPocketFinder):
"""Implementation that uses convex hull of protein to find pockets.
Based on https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4112621/pdf/1472-6807-14-18.pdf
"""
def __init__(self, pad=5):
self.pad = pad
def find_all_pockets(self, protein_file):
"""Find list of binding pockets on protein."""
# protein_coords is (N, 3) tensor
coords = load_molecule(protein_file, add_hydrogens=False)[0]
return get_all_boxes(coords, self.pad)
def find_pockets(self, protein_file, ligand_file):
"""Find list of suitable binding pockets on protein."""
protein_coords = load_molecule(protein_file, add_hydrogens=False)[0]
ligand_coords = load_molecule(ligand_file, add_hydrogens=False)[0]
boxes = get_all_boxes(protein_coords, self.pad)
mapping = boxes_to_atoms(protein_coords, boxes)
pockets, pocket_atoms_map = merge_overlapping_boxes(mapping, boxes)
pocket_coords = []
for pocket in pockets:
atoms = pocket_atoms_map[pocket]
coords = np.zeros((len(atoms), 3))
for ind, atom in enumerate(atoms):
coords[ind] = protein_coords[atom]
pocket_coords.append(coords)
return pockets, pocket_atoms_map, pocket_coords
class RFConvexHullPocketFinder(BindingPocketFinder):
"""Uses pre-trained RF model + ConvexHulPocketFinder to select pockets."""
def __init__(self, pad=5):
self.pad = pad
self.convex_finder = ConvexHullPocketFinder(pad)
# Load binding pocket model
self.base_dir = tempfile.mkdtemp()
print("About to download trained model.")
# TODO(rbharath): Shift refined to full once trained.
call(("wget -c http://deepchem.io.s3-website-us-west-1.amazonaws.com/trained_models/pocket_random_refined_RF.tar.gz").split())
call(("tar -zxvf pocket_random_refined_RF.tar.gz").split())
call(("mv pocket_random_refined_RF %s" % (self.base_dir)).split())
self.model_dir = os.path.join(self.base_dir, "pocket_random_refined_RF")
# Fit model on dataset
self.model = SklearnModel(model_dir=self.model_dir)
self.model.reload()
# Create featurizers
self.pocket_featurizer = BindingPocketFeaturizer()
self.ligand_featurizer = CircularFingerprint(size=1024)
def find_pockets(self, protein_file, ligand_file):
"""Compute features for a given complex
TODO(rbharath): This has a log of code overlap with
compute_binding_pocket_features in
examples/binding_pockets/binding_pocket_datasets.py. Find way to refactor
to avoid code duplication.
"""
if not ligand_file.endswith(".sdf"):
raise ValueError("Only .sdf ligand files can be featurized.")
ligand_basename = os.path.basename(ligand_file).split(".")[0]
ligand_mol2 = os.path.join(
self.base_dir, ligand_basename + ".mol2")
# Write mol2 file for ligand
obConversion = ob.OBConversion()
conv_out = obConversion.SetInAndOutFormats(str("sdf"), str("mol2"))
ob_mol = ob.OBMol()
obConversion.ReadFile(ob_mol, str(ligand_file))
obConversion.WriteFile(ob_mol, str(ligand_mol2))
# Featurize ligand
mol = Chem.MolFromMol2File(str(ligand_mol2), removeHs=False)
if mol is None:
return None, None
# Default for CircularFingerprint
n_ligand_features = 1024
ligand_features = self.ligand_featurizer.featurize([mol])
# Featurize pocket
pockets, pocket_atoms_map, pocket_coords = self.convex_finder.find_pockets(
protein_file, ligand_file)
n_pockets = len(pockets)
n_pocket_features = BindingPocketFeaturizer.n_features
features = np.zeros((n_pockets, n_pocket_features+n_ligand_features))
pocket_features = self.pocket_featurizer.featurize(
protein_file, pockets, pocket_atoms_map, pocket_coords)
# Note broadcast operation
features[:, :n_pocket_features] = pocket_features
features[:, n_pocket_features:] = ligand_features
dataset = NumpyDataset(X=features)
pocket_preds = self.model.predict(dataset)
pocket_pred_proba = np.squeeze(self.model.predict_proba(dataset))
# Find pockets which are active
active_pockets = []
active_pocket_atoms_map = {}
active_pocket_coords = []
for pocket_ind in range(len(pockets)):
#################################################### DEBUG
# TODO(rbharath): For now, using a weak cutoff. Fix later.
#if pocket_preds[pocket_ind] == 1:
if pocket_pred_proba[pocket_ind][1] > .15:
#################################################### DEBUG
pocket = pockets[pocket_ind]
active_pockets.append(pocket)
active_pocket_atoms_map[pocket] = pocket_atoms_map[pocket]
active_pocket_coords.append(pocket_coords[pocket_ind])
return active_pockets, active_pocket_atoms_map, active_pocket_coords
| gpl-3.0 |
mganeva/mantid | scripts/test/directtools/DirectToolsTest.py | 1 | 17930 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import (absolute_import, division, print_function)
# Set matplotlib backend to AGG before anything else. Otherwise some build servers
# need to have extra packages (tkinter) installed.
import matplotlib
matplotlib.use('AGG')
import directtools
from mantid.api import mtd
from mantid.simpleapi import (AddSampleLog, CloneWorkspace, ComputeIncoherentDOS, ConvertSpectrumAxis, CreateSampleWorkspace,
CreateWorkspace, DirectILLCollectData, DeleteWorkspace, DirectILLReduction, LoadILLTOF,
MoveInstrumentComponent, SetInstrumentParameter)
import numpy
import numpy.testing
import testhelpers
import unittest
class DirectToolsTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
workspace = DirectILLCollectData('ILL/IN4/084446.nxs', EPPCreationMethod='Calculate EPP',
IncidentEnergyCalibration='Energy Calibration OFF',
FlatBkg='Flat Bkg OFF', Normalisation='Normalisation OFF',
StoreInADS=False)
cls._sqw = DirectILLReduction(workspace, OutputWorkspace='unused', StoreInADS=False)
def tearDown(self):
mtd.clear()
def _box2DSetup(self):
xs = numpy.tile(numpy.array([-1, 0, 2, 4, 5]), (3, 1))
vertAxis = numpy.array([-2, -1, 0, 1])
return xs, vertAxis
def test_box2D_defaults(self):
xs, vertAxis = self._box2DSetup()
vertAxis = numpy.array([-2, -1, 0, 1])
box = directtools.box2D(xs, vertAxis)
numpy.testing.assert_equal(xs[box], xs)
def test_box2D_horMin(self):
xs, vertAxis = self._box2DSetup()
box = directtools.box2D(xs, vertAxis, horMin=0)
expected = numpy.tile(numpy.array([0, 2, 4, 5]), (3, 1))
numpy.testing.assert_equal(xs[box], expected)
def test_box2D_horMax(self):
xs, vertAxis = self._box2DSetup()
box = directtools.box2D(xs, vertAxis, horMax=5)
expected = numpy.tile(numpy.array([-1, 0, 2, 4]), (3, 1))
numpy.testing.assert_equal(xs[box], expected)
def test_box2D_vertMin(self):
xs, vertAxis = self._box2DSetup()
box = directtools.box2D(xs, vertAxis, vertMin=-1)
expected = numpy.tile(numpy.array([-1, 0, 2, 4, 5]), (2, 1))
numpy.testing.assert_equal(xs[box], expected)
def test_box2D_vertMax(self):
xs, vertAxis = self._box2DSetup()
box = directtools.box2D(xs, vertAxis, vertMax=-1)
expected = numpy.tile(numpy.array([-1, 0, 2, 4, 5]), (1, 1))
numpy.testing.assert_equal(xs[box], expected)
def test_configurematplotlib(self):
defaultParams = directtools.defaultrcparams()
directtools._configurematplotlib(defaultParams)
for key in defaultParams:
self.assertTrue(key in matplotlib.rcParams)
self.assertEqual(matplotlib.rcParams[key], defaultParams[key])
def test_defaultrcParams(self):
result = directtools.defaultrcparams()
self.assertEqual(result, {'legend.numpoints': 1})
def test_dynamicsusceptibility(self):
xs = numpy.array([-1, 0, 1])
ys = numpy.array([1, 1])
vertX = numpy.array([-1, 1])
ws = CreateWorkspace(DataX=xs, DataY=ys, NSpec=1, UnitX='DeltaE', VerticalAxisUnit='MomentumTransfer', VerticalAxisValues=vertX,
StoreInADS=False)
wsOut = directtools.dynamicsusceptibility(ws, 100.)
self.assertEqual(wsOut.YUnitLabel(), 'Dynamic susceptibility')
xs = numpy.array([0, 1, 0, 1])
ys = numpy.array([1, 1])
ws = CreateWorkspace(DataX=xs, DataY=ys, NSpec=2, UnitX='MomentumTransfer', VerticalAxisUnit='DeltaE', VerticalAxisValues=vertX,
StoreInADS=False)
wsOut = directtools.dynamicsusceptibility(ws, 100.)
self.assertEqual(wsOut.YUnitLabel(), 'Dynamic susceptibility')
def test_dynamicsusceptibility_removesingularity(self):
xs = numpy.array([-0.7, -0.4, -0.1, 0.2, 0.5])
ys = numpy.array([2, 2, 2, 2])
es = numpy.sqrt(ys)
vertX = numpy.array([-1, 1])
ws = CreateWorkspace(DataX=xs, DataY=ys, DataE=es, NSpec=1, UnitX='DeltaE', VerticalAxisUnit='MomentumTransfer',
VerticalAxisValues=vertX, StoreInADS=False)
wsOut = directtools.dynamicsusceptibility(ws, 100., zeroEnergyEpsilon=0.13)
numpy.testing.assert_equal(wsOut.readX(0), xs)
outYs = wsOut.readY(0)
outEs = wsOut.readE(0)
self.assertEqual(outYs[2], 0.)
self.assertEqual(outEs[2], 0.)
def test_mantidsubplotsetup(self):
result = directtools._mantidsubplotsetup()
self.assertEqual(result, {'projection': 'mantid'})
def _nanminmaxSetup(self):
xs = numpy.tile(numpy.array([-1, 0, 2, 4, 5]), 3)
ys = numpy.linspace(-5, 3, 4 * 3)
vertAxis = numpy.array([-3, -1, 2, 4])
ws = CreateWorkspace(DataX=xs, DataY=ys, NSpec=3, VerticalAxisUnit='Degrees', VerticalAxisValues=vertAxis, StoreInADS=False)
return ws
def test_nanminmax_defaults(self):
ws = self._nanminmaxSetup()
ys = ws.extractY()
cMin, cMax = directtools.nanminmax(ws)
self.assertEqual(cMin, ys[0, 0])
self.assertEqual(cMax, ys[2, -1])
def test_nanminmax_horMin(self):
ws = self._nanminmaxSetup()
ys = ws.extractY()
cMin, cMax = directtools.nanminmax(ws, horMin=0)
self.assertEqual(cMin, ys[0, 1])
self.assertEqual(cMax, ys[2, -1])
def test_nanminmax_horMax(self):
ws = self._nanminmaxSetup()
ys = ws.extractY()
cMin, cMax = directtools.nanminmax(ws, horMax=4)
self.assertEqual(cMin, ys[0, 0])
self.assertEqual(cMax, ys[2, -2])
def test_nanminmax_vertMin(self):
ws = self._nanminmaxSetup()
ys = ws.extractY()
cMin, cMax = directtools.nanminmax(ws, vertMin=-1)
self.assertEqual(cMin, ys[1, 0])
self.assertEqual(cMax, ys[2, -1])
def test_nanminmax_vertMax(self):
ws = self._nanminmaxSetup()
ys = ws.extractY()
cMin, cMax = directtools.nanminmax(ws, vertMax=2)
self.assertEqual(cMin, ys[0, 0])
self.assertEqual(cMax, ys[1, -1])
def test_plotconstE_nonListArgsExecutes(self):
kwargs = {
'workspaces': self._sqw,
'E' : -1.,
'dE' : 1.5
}
testhelpers.assertRaisesNothing(self, directtools.plotconstE, **kwargs)
def test_plotconstE_wsListExecutes(self):
kwargs = {
'workspaces': [self._sqw, self._sqw],
'E' : -2.,
'dE' : 1.5,
'style' : 'l'
}
testhelpers.assertRaisesNothing(self, directtools.plotconstE, **kwargs)
def test_plotconstE_EListExecutes(self):
kwargs = {
'workspaces': self._sqw,
'E' : [-3., 4.],
'dE' : 1.5,
'style' : 'm'
}
testhelpers.assertRaisesNothing(self, directtools.plotconstE, **kwargs)
def test_plotconstE_dEListExecutes(self):
kwargs = {
'workspaces': self._sqw,
'E' : 3.,
'dE' : [1.5, 15.],
'style' : 'lm'
}
testhelpers.assertRaisesNothing(self, directtools.plotconstE, **kwargs)
def test_plotconstE_loglog(self):
kwargs = {
'workspaces': self._sqw,
'E' : -10.,
'dE' : 1.5,
'xscale': 'log',
'yscale': 'log'
}
figure, axes, cuts = testhelpers.assertRaisesNothing(self, directtools.plotconstE, **kwargs)
self.assertEquals(axes.get_xscale(), 'log')
self.assertEquals(axes.get_yscale(), 'log')
def test_plotconstQ_nonListArgsExecutes(self):
kwargs = {
'workspaces': self._sqw,
'Q' : 2.3,
'dQ' : 0.3
}
testhelpers.assertRaisesNothing(self, directtools.plotconstQ, **kwargs)
def test_plotconstQ_wsListExecutes(self):
kwargs = {
'workspaces': [self._sqw, self._sqw],
'Q' : 2.4,
'dQ' : 0.42,
'style' : 'l'
}
testhelpers.assertRaisesNothing(self, directtools.plotconstQ, **kwargs)
def test_plotconstQ_QListExecutes(self):
kwargs = {
'workspaces': self._sqw,
'Q' : [1.8, 3.1],
'dQ' : 0.32,
'style' : 'm'
}
testhelpers.assertRaisesNothing(self, directtools.plotconstQ, **kwargs)
def test_plotconstQ_dQListExecutes(self):
kwargs = {
'workspaces': self._sqw,
'Q' : 1.9,
'dQ' : [0.2, 0.4],
'style' : 'ml'
}
testhelpers.assertRaisesNothing(self, directtools.plotconstQ, **kwargs)
def test_plotconstQ_loglog(self):
kwargs = {
'workspaces': self._sqw,
'Q' : 2.6,
'dQ' : 0.1,
'xscale': 'log',
'yscale': 'log'
}
figure, axes, cuts = testhelpers.assertRaisesNothing(self, directtools.plotconstQ, **kwargs)
self.assertEquals(axes.get_xscale(), 'log')
self.assertEquals(axes.get_yscale(), 'log')
def test_plotconstE_and_plotconstQ_plot_equal_value_at_crossing(self):
Q = 2.512
figure, axes, cuts = directtools.plotconstQ(self._sqw, Q, 0.01)
lineDataQ = axes.get_lines()[0].get_data()
E = 2.2
figure, axes, cuts = directtools.plotconstE(self._sqw, E, 0.01)
lineDataE = axes.get_lines()[0].get_data()
indexE = numpy.argmin(numpy.abs(lineDataQ[0] - E))
indexQ = numpy.argmin(numpy.abs(lineDataE[0] - Q))
self.assertEquals(lineDataQ[1][indexE], lineDataE[1][indexQ])
def test_plotcuts_keepCutWorkspaces(self):
kwargs = {
'direction' : 'Vertical',
'workspaces' : self._sqw,
'cuts' : 1.9,
'widths': 0.8,
'quantity': 'TOF',
'unit': 'microseconds',
'keepCutWorkspaces': True
}
self.assertEquals(mtd.size(), 0)
figure, axes, cuts = testhelpers.assertRaisesNothing(self, directtools.plotcuts, **kwargs)
self.assertEquals(len(cuts), 1)
self.assertEquals(mtd.size(), 1)
def test_plotcuts_doNotKeepCutWorkspaces(self):
kwargs = {
'direction' : 'Vertical',
'workspaces' : self._sqw,
'cuts' : 2.0,
'widths': 0.7,
'quantity': 'TOF',
'unit': 'microseconds',
'keepCutWorkspaces': False
}
self.assertEquals(mtd.size(), 0)
figure, axes, cuts = testhelpers.assertRaisesNothing(self, directtools.plotcuts, **kwargs)
self.assertEquals(len(cuts), 0)
self.assertEquals(mtd.size(), 0)
def test_plotcuts_loglog(self):
kwargs = {
'direction' : 'Vertical',
'workspaces' : self._sqw,
'cuts' : 2.1,
'widths': 0.6,
'quantity': 'TOF',
'unit': 'microseconds',
'xscale': 'log',
'yscale': 'log'
}
self.assertEquals(mtd.size(), 0)
figure, axes, cuts = testhelpers.assertRaisesNothing(self, directtools.plotcuts, **kwargs)
self.assertEquals(axes.get_xscale(), 'log')
self.assertEquals(axes.get_yscale(), 'log')
def test_plotprofiles_noXUnitsExecutes(self):
xs = numpy.linspace(-3., 10., 12)
ys = numpy.tile(1., len(xs) - 1)
ws = CreateWorkspace(DataX=xs, DataY=ys, NSpec=1, StoreInADS=False)
kwargs = {'workspaces': ws}
figure, axes = testhelpers.assertRaisesNothing(self, directtools.plotprofiles, **kwargs)
self.assertEquals(axes.get_xlabel(), '')
self.assertEquals(axes.get_ylabel(), r'$S(Q,E)$')
numpy.testing.assert_equal(axes.get_lines()[0].get_data()[0], (xs[1:] + xs[:-1])/2)
numpy.testing.assert_equal(axes.get_lines()[0].get_data()[1], ys)
def test_plotprofiles_DeltaEXUnitsExecutes(self):
xs = numpy.linspace(-3., 10., 12)
ys = numpy.tile(1., len(xs) - 1)
ws = CreateWorkspace(DataX=xs, DataY=ys, NSpec=1, UnitX='DeltaE', StoreInADS=False)
kwargs = {'workspaces': ws}
figure, axes = testhelpers.assertRaisesNothing(self, directtools.plotprofiles, **kwargs)
self.assertEquals(axes.get_xlabel(), 'Energy (meV)')
self.assertEquals(axes.get_ylabel(), r'$S(Q,E)$')
numpy.testing.assert_equal(axes.get_lines()[0].get_data()[0], (xs[1:] + xs[:-1])/2)
numpy.testing.assert_equal(axes.get_lines()[0].get_data()[1], ys)
def test_plotprofiles_MomentumTransferXUnitsExecutes(self):
xs = numpy.linspace(-3., 10., 12)
ys = numpy.tile(1., len(xs) - 1)
ws = CreateWorkspace(DataX=xs, DataY=ys, NSpec=1, UnitX='MomentumTransfer', StoreInADS=False)
kwargs = {'workspaces': ws}
figure, axes = testhelpers.assertRaisesNothing(self, directtools.plotprofiles, **kwargs)
self.assertEquals(axes.get_xlabel(), u'$Q$ (\u00c5$^{-1}$)')
self.assertEquals(axes.get_ylabel(), '$S(Q,E)$')
numpy.testing.assert_equal(axes.get_lines()[0].get_data()[0], (xs[1:] + xs[:-1])/2)
numpy.testing.assert_equal(axes.get_lines()[0].get_data()[1], ys)
def test_plotprofiles_loglog(self):
xs = numpy.linspace(-3., 10., 12)
ys = numpy.tile(1., len(xs) - 1)
ws = CreateWorkspace(DataX=xs, DataY=ys, NSpec=1, UnitX='MomentumTransfer', StoreInADS=False)
kwargs = {'workspaces': ws, 'xscale': 'log', 'yscale': 'log'}
figure, axes = testhelpers.assertRaisesNothing(self, directtools.plotprofiles, **kwargs)
self.assertEquals(axes.get_xscale(), 'log')
self.assertEquals(axes.get_yscale(), 'log')
def test_plotDOS_PlotSingle(self):
ws = CreateSampleWorkspace(NumBanks=1, XUnit='DeltaE', XMin=-12., XMax=12., BinWidth=0.2, StoreInADS=False)
MoveInstrumentComponent(ws, 'bank1', X=-0.5, StoreInADS=False)
ws = ConvertSpectrumAxis(ws, 'Theta', 'Direct', 14., StoreInADS=False)
SetInstrumentParameter(ws, ParameterName='deltaE-mode', Value='direct', StoreInADS=False)
AddSampleLog(ws, LogName='Ei', LogText=str(14.), LogType='Number', LogUnit='meV', StoreInADS=False)
stw = ComputeIncoherentDOS(ws, StoreInADS=False)
kwargs = {'workspaces': stw}
figure, axes = testhelpers.assertRaisesNothing(self, directtools.plotDOS, **kwargs)
self.assertEquals(axes.get_xlabel(), u'Energy transfer ($meV$)')
self.assertEquals(axes.get_ylabel(), u'$g(E)$')
def test_plotDOS_PlotMultiple(self):
ws = CreateSampleWorkspace(NumBanks=1, XUnit='DeltaE', XMin=-12., XMax=12., BinWidth=0.2, StoreInADS=False)
MoveInstrumentComponent(ws, 'bank1', X=-0.5, StoreInADS=False)
ws = ConvertSpectrumAxis(ws, 'Theta', 'Direct', 14., StoreInADS=False)
SetInstrumentParameter(ws, ParameterName='deltaE-mode', Value='direct', StoreInADS=False)
AddSampleLog(ws, LogName='Ei', LogText=str(14.), LogType='Number', LogUnit='meV', StoreInADS=False)
stw = ComputeIncoherentDOS(ws)
kwargs = {'workspaces': [stw, 'stw']}
figure, axes = testhelpers.assertRaisesNothing(self, directtools.plotDOS, **kwargs)
self.assertEquals(axes.get_xlabel(), u'Energy transfer ($meV$)')
self.assertEquals(axes.get_ylabel(), u'$g(E)$')
def test_plotSofQW(self):
wsName = 'ws'
CloneWorkspace(self._sqw, OutputWorkspace=wsName)
kwargs = {'workspace': wsName}
testhelpers.assertRaisesNothing(self, directtools.plotSofQW, **kwargs)
DeleteWorkspace(wsName)
kwargs = {'workspace': self._sqw}
testhelpers.assertRaisesNothing(self, directtools.plotSofQW, **kwargs)
def test_subplots(self):
testhelpers.assertRaisesNothing(self, directtools.subplots)
def test_validQ(self):
xs = numpy.tile(numpy.array([-1, 0, 2, 4, 5]), 3)
nPoints = 4
ys = numpy.tile(numpy.zeros(nPoints), 3)
ys[nPoints] = numpy.nan
ys[2 * nPoints - 1] = numpy.nan
vertAxis = numpy.array([-3, -1, 2, 4])
ws = CreateWorkspace(DataX=xs, DataY=ys, NSpec=3, VerticalAxisUnit='Degrees', VerticalAxisValues=vertAxis, StoreInADS=False)
qMin, qMax = directtools.validQ(ws, -2.5)
self.assertEqual(qMin, xs[0])
self.assertEqual(qMax, xs[-1])
qMin, qMax = directtools.validQ(ws, 0)
self.assertEqual(qMin, xs[1])
self.assertEqual(qMax, xs[-2])
def test_wsreport(self):
testhelpers.assertRaisesNothing(self, directtools.wsreport, **{'workspace': self._sqw})
in5WS = LoadILLTOF('ILL/IN5/104007.nxs', StoreInADS=False)
testhelpers.assertRaisesNothing(self, directtools.wsreport, **{'workspace': in5WS})
in6WS = LoadILLTOF('ILL/IN6/164192.nxs', StoreInADS=False)
testhelpers.assertRaisesNothing(self, directtools.wsreport, **{'workspace': in6WS})
def test_SampleLogs(self):
ws = CreateSampleWorkspace(NumBanks=1, BankPixelWidth=1)
ws.mutableRun().addProperty('a', 7, True)
ws.mutableRun().addProperty('b.c', 13, True)
logs = directtools.SampleLogs(ws)
self.assertTrue(hasattr(logs, 'a'))
self.assertEqual(logs.a, 7)
self.assertTrue(hasattr(logs, 'b'))
self.assertTrue(hasattr(logs.b, 'c'))
self.assertEqual(logs.b.c, 13)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
petebachant/seaborn | seaborn/categorical.py | 19 | 102299 | from __future__ import division
from textwrap import dedent
import colorsys
import numpy as np
from scipy import stats
import pandas as pd
from pandas.core.series import remove_na
import matplotlib as mpl
import matplotlib.pyplot as plt
import warnings
from .external.six import string_types
from .external.six.moves import range
from . import utils
from .utils import desaturate, iqr, categorical_order
from .algorithms import bootstrap
from .palettes import color_palette, husl_palette, light_palette
from .axisgrid import FacetGrid, _facet_docs
class _CategoricalPlotter(object):
width = .8
def establish_variables(self, x=None, y=None, hue=None, data=None,
orient=None, order=None, hue_order=None,
units=None):
"""Convert input specification into a common representation."""
# Option 1:
# We are plotting a wide-form dataset
# -----------------------------------
if x is None and y is None:
# Do a sanity check on the inputs
if hue is not None:
error = "Cannot use `hue` without `x` or `y`"
raise ValueError(error)
# No hue grouping with wide inputs
plot_hues = None
hue_title = None
hue_names = None
# No statistical units with wide inputs
plot_units = None
# We also won't get a axes labels here
value_label = None
group_label = None
# Option 1a:
# The input data is a Pandas DataFrame
# ------------------------------------
if isinstance(data, pd.DataFrame):
# Order the data correctly
if order is None:
order = []
# Reduce to just numeric columns
for col in data:
try:
data[col].astype(np.float)
order.append(col)
except ValueError:
pass
plot_data = data[order]
group_names = order
group_label = data.columns.name
# Convert to a list of arrays, the common representation
iter_data = plot_data.iteritems()
plot_data = [np.asarray(s, np.float) for k, s in iter_data]
# Option 1b:
# The input data is an array or list
# ----------------------------------
else:
# We can't reorder the data
if order is not None:
error = "Input data must be a pandas object to reorder"
raise ValueError(error)
# The input data is an array
if hasattr(data, "shape"):
if len(data.shape) == 1:
if np.isscalar(data[0]):
plot_data = [data]
else:
plot_data = list(data)
elif len(data.shape) == 2:
nr, nc = data.shape
if nr == 1 or nc == 1:
plot_data = [data.ravel()]
else:
plot_data = [data[:, i] for i in range(nc)]
else:
error = ("Input `data` can have no "
"more than 2 dimensions")
raise ValueError(error)
# Check if `data` is None to let us bail out here (for testing)
elif data is None:
plot_data = [[]]
# The input data is a flat list
elif np.isscalar(data[0]):
plot_data = [data]
# The input data is a nested list
# This will catch some things that might fail later
# but exhaustive checks are hard
else:
plot_data = data
# Convert to a list of arrays, the common representation
plot_data = [np.asarray(d, np.float) for d in plot_data]
# The group names will just be numeric indices
group_names = list(range((len(plot_data))))
# Figure out the plotting orientation
orient = "h" if str(orient).startswith("h") else "v"
# Option 2:
# We are plotting a long-form dataset
# -----------------------------------
else:
# See if we need to get variables from `data`
if data is not None:
x = data.get(x, x)
y = data.get(y, y)
hue = data.get(hue, hue)
units = data.get(units, units)
# Validate the inputs
for input in [x, y, hue, units]:
if isinstance(input, string_types):
err = "Could not interpret input '{}'".format(input)
raise ValueError(err)
# Figure out the plotting orientation
orient = self.infer_orient(x, y, orient)
# Option 2a:
# We are plotting a single set of data
# ------------------------------------
if x is None or y is None:
# Determine where the data are
vals = y if x is None else x
# Put them into the common representation
plot_data = [np.asarray(vals)]
# Get a label for the value axis
if hasattr(vals, "name"):
value_label = vals.name
else:
value_label = None
# This plot will not have group labels or hue nesting
groups = None
group_label = None
group_names = []
plot_hues = None
hue_names = None
hue_title = None
plot_units = None
# Option 2b:
# We are grouping the data values by another variable
# ---------------------------------------------------
else:
# Determine which role each variable will play
if orient == "v":
vals, groups = y, x
else:
vals, groups = x, y
# Get the categorical axis label
group_label = None
if hasattr(groups, "name"):
group_label = groups.name
# Get the order on the categorical axis
group_names = categorical_order(groups, order)
# Group the numeric data
plot_data, value_label = self._group_longform(vals, groups,
group_names)
# Now handle the hue levels for nested ordering
if hue is None:
plot_hues = None
hue_title = None
hue_names = None
else:
# Get the order of the hue levels
hue_names = categorical_order(hue, hue_order)
# Group the hue data
plot_hues, hue_title = self._group_longform(hue, groups,
group_names)
# Now handle the units for nested observations
if units is None:
plot_units = None
else:
plot_units, _ = self._group_longform(units, groups,
group_names)
# Assign object attributes
# ------------------------
self.orient = orient
self.plot_data = plot_data
self.group_label = group_label
self.value_label = value_label
self.group_names = group_names
self.plot_hues = plot_hues
self.hue_title = hue_title
self.hue_names = hue_names
self.plot_units = plot_units
def _group_longform(self, vals, grouper, order):
"""Group a long-form variable by another with correct order."""
# Ensure that the groupby will work
if not isinstance(vals, pd.Series):
vals = pd.Series(vals)
# Group the val data
grouped_vals = vals.groupby(grouper)
out_data = []
for g in order:
try:
g_vals = np.asarray(grouped_vals.get_group(g))
except KeyError:
g_vals = np.array([])
out_data.append(g_vals)
# Get the vals axis label
label = vals.name
return out_data, label
def establish_colors(self, color, palette, saturation):
"""Get a list of colors for the main component of the plots."""
if self.hue_names is None:
n_colors = len(self.plot_data)
else:
n_colors = len(self.hue_names)
# Determine the main colors
if color is None and palette is None:
# Determine whether the current palette will have enough values
# If not, we'll default to the husl palette so each is distinct
current_palette = mpl.rcParams["axes.color_cycle"]
if n_colors <= len(current_palette):
colors = color_palette(n_colors=n_colors)
else:
colors = husl_palette(n_colors, l=.7)
elif palette is None:
# When passing a specific color, the interpretation depends
# on whether there is a hue variable or not.
# If so, we will make a blend palette so that the different
# levels have some amount of variation.
if self.hue_names is None:
colors = [color] * n_colors
else:
colors = light_palette(color, n_colors)
else:
# Let `palette` be a dict mapping level to color
if isinstance(palette, dict):
if self.hue_names is None:
levels = self.group_names
else:
levels = self.hue_names
palette = [palette[l] for l in levels]
colors = color_palette(palette, n_colors)
# Conver the colors to a common rgb representation
colors = [mpl.colors.colorConverter.to_rgb(c) for c in colors]
# Desaturate a bit because these are patches
if saturation < 1:
colors = [desaturate(c, saturation) for c in colors]
# Determine the gray color to use for the lines framing the plot
light_vals = [colorsys.rgb_to_hls(*c)[1] for c in colors]
l = min(light_vals) * .6
gray = (l, l, l)
# Assign object attributes
self.colors = colors
self.gray = gray
def infer_orient(self, x, y, orient=None):
"""Determine how the plot should be oriented based on the data."""
orient = str(orient)
def is_categorical(s):
try:
# Correct way, but doesnt exist in older Pandas
return pd.core.common.is_categorical_dtype(s)
except AttributeError:
# Also works, but feels hackier
return str(s.dtype) == "categorical"
def is_not_numeric(s):
try:
np.asarray(s, dtype=np.float)
except ValueError:
return True
return False
no_numeric = "Neither the `x` nor `y` variable appears to be numeric."
if orient.startswith("v"):
return "v"
elif orient.startswith("h"):
return "h"
elif x is None:
return "v"
elif y is None:
return "h"
elif is_categorical(y):
if is_categorical(x):
raise ValueError(no_numeric)
else:
return "h"
elif is_not_numeric(y):
if is_not_numeric(x):
raise ValueError(no_numeric)
else:
return "h"
else:
return "v"
@property
def hue_offsets(self):
"""A list of center positions for plots when hue nesting is used."""
n_levels = len(self.hue_names)
each_width = self.width / n_levels
offsets = np.linspace(0, self.width - each_width, n_levels)
offsets -= offsets.mean()
return offsets
@property
def nested_width(self):
"""A float with the width of plot elements when hue nesting is used."""
return self.width / len(self.hue_names) * .98
def annotate_axes(self, ax):
"""Add descriptive labels to an Axes object."""
if self.orient == "v":
xlabel, ylabel = self.group_label, self.value_label
else:
xlabel, ylabel = self.value_label, self.group_label
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if self.orient == "v":
ax.set_xticks(np.arange(len(self.plot_data)))
ax.set_xticklabels(self.group_names)
else:
ax.set_yticks(np.arange(len(self.plot_data)))
ax.set_yticklabels(self.group_names)
if self.orient == "v":
ax.xaxis.grid(False)
ax.set_xlim(-.5, len(self.plot_data) - .5)
else:
ax.yaxis.grid(False)
ax.set_ylim(-.5, len(self.plot_data) - .5)
if self.hue_names is not None:
leg = ax.legend(loc="best")
if self.hue_title is not None:
leg.set_title(self.hue_title)
# Set the title size a roundabout way to maintain
# compatability with matplotlib 1.1
try:
title_size = mpl.rcParams["axes.labelsize"] * .85
except TypeError: # labelsize is something like "large"
title_size = mpl.rcParams["axes.labelsize"]
prop = mpl.font_manager.FontProperties(size=title_size)
leg._legend_title_box._text.set_font_properties(prop)
def add_legend_data(self, ax, color, label):
"""Add a dummy patch object so we can get legend data."""
rect = plt.Rectangle([0, 0], 0, 0,
linewidth=self.linewidth / 2,
edgecolor=self.gray,
facecolor=color,
label=label)
ax.add_patch(rect)
class _BoxPlotter(_CategoricalPlotter):
def __init__(self, x, y, hue, data, order, hue_order,
orient, color, palette, saturation,
width, fliersize, linewidth):
self.establish_variables(x, y, hue, data, orient, order, hue_order)
self.establish_colors(color, palette, saturation)
self.width = width
self.fliersize = fliersize
if linewidth is None:
linewidth = mpl.rcParams["lines.linewidth"]
self.linewidth = linewidth
def draw_boxplot(self, ax, kws):
"""Use matplotlib to draw a boxplot on an Axes."""
vert = self.orient == "v"
for i, group_data in enumerate(self.plot_data):
if self.plot_hues is None:
# Handle case where there is data at this level
if group_data.size == 0:
continue
# Draw a single box or a set of boxes
# with a single level of grouping
box_data = remove_na(group_data)
# Handle case where there is no non-null data
if box_data.size == 0:
continue
artist_dict = ax.boxplot(box_data,
vert=vert,
patch_artist=True,
positions=[i],
widths=self.width,
**kws)
color = self.colors[i]
self.restyle_boxplot(artist_dict, color, kws)
else:
# Draw nested groups of boxes
offsets = self.hue_offsets
for j, hue_level in enumerate(self.hue_names):
hue_mask = self.plot_hues[i] == hue_level
# Add a legend for this hue level
if not i:
self.add_legend_data(ax, self.colors[j], hue_level)
# Handle case where there is data at this level
if group_data.size == 0:
continue
box_data = remove_na(group_data[hue_mask])
# Handle case where there is no non-null data
if box_data.size == 0:
continue
center = i + offsets[j]
artist_dict = ax.boxplot(box_data,
vert=vert,
patch_artist=True,
positions=[center],
widths=self.nested_width,
**kws)
self.restyle_boxplot(artist_dict, self.colors[j], kws)
# Add legend data, but just for one set of boxes
def restyle_boxplot(self, artist_dict, color, kws):
"""Take a drawn matplotlib boxplot and make it look nice."""
for box in artist_dict["boxes"]:
box.update(dict(color=color,
zorder=.9,
edgecolor=self.gray,
linewidth=self.linewidth))
box.update(kws.get("boxprops", {}))
for whisk in artist_dict["whiskers"]:
whisk.update(dict(color=self.gray,
linewidth=self.linewidth,
linestyle="-"))
whisk.update(kws.get("whiskerprops", {}))
for cap in artist_dict["caps"]:
cap.update(dict(color=self.gray,
linewidth=self.linewidth))
cap.update(kws.get("capprops", {}))
for med in artist_dict["medians"]:
med.update(dict(color=self.gray,
linewidth=self.linewidth))
med.update(kws.get("medianprops", {}))
for fly in artist_dict["fliers"]:
fly.update(dict(color=self.gray,
marker="d",
markeredgecolor=self.gray,
markersize=self.fliersize))
fly.update(kws.get("flierprops", {}))
def plot(self, ax, boxplot_kws):
"""Make the plot."""
self.draw_boxplot(ax, boxplot_kws)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
class _ViolinPlotter(_CategoricalPlotter):
def __init__(self, x, y, hue, data, order, hue_order,
bw, cut, scale, scale_hue, gridsize,
width, inner, split, orient, linewidth,
color, palette, saturation):
self.establish_variables(x, y, hue, data, orient, order, hue_order)
self.establish_colors(color, palette, saturation)
self.estimate_densities(bw, cut, scale, scale_hue, gridsize)
self.gridsize = gridsize
self.width = width
if inner is not None:
if not any([inner.startswith("quart"),
inner.startswith("box"),
inner.startswith("stick"),
inner.startswith("point")]):
err = "Inner style '{}' not recognized".format(inner)
raise ValueError(err)
self.inner = inner
if split and self.hue_names is not None and len(self.hue_names) != 2:
raise ValueError("Cannot use `split` with more than 2 hue levels.")
self.split = split
if linewidth is None:
linewidth = mpl.rcParams["lines.linewidth"]
self.linewidth = linewidth
def estimate_densities(self, bw, cut, scale, scale_hue, gridsize):
"""Find the support and density for all of the data."""
# Initialize data structures to keep track of plotting data
if self.hue_names is None:
support = []
density = []
counts = np.zeros(len(self.plot_data))
max_density = np.zeros(len(self.plot_data))
else:
support = [[] for _ in self.plot_data]
density = [[] for _ in self.plot_data]
size = len(self.group_names), len(self.hue_names)
counts = np.zeros(size)
max_density = np.zeros(size)
for i, group_data in enumerate(self.plot_data):
# Option 1: we have a single level of grouping
# --------------------------------------------
if self.plot_hues is None:
# Strip missing datapoints
kde_data = remove_na(group_data)
# Handle special case of no data at this level
if kde_data.size == 0:
support.append(np.array([]))
density.append(np.array([1.]))
counts[i] = 0
max_density[i] = 0
continue
# Handle special case of a single unique datapoint
elif np.unique(kde_data).size == 1:
support.append(np.unique(kde_data))
density.append(np.array([1.]))
counts[i] = 1
max_density[i] = 0
continue
# Fit the KDE and get the used bandwidth size
kde, bw_used = self.fit_kde(kde_data, bw)
# Determine the support grid and get the density over it
support_i = self.kde_support(kde_data, bw_used, cut, gridsize)
density_i = kde.evaluate(support_i)
# Update the data structures with these results
support.append(support_i)
density.append(density_i)
counts[i] = kde_data.size
max_density[i] = density_i.max()
# Option 2: we have nested grouping by a hue variable
# ---------------------------------------------------
else:
for j, hue_level in enumerate(self.hue_names):
# Handle special case of no data at this category level
if not group_data.size:
support[i].append(np.array([]))
density[i].append(np.array([1.]))
counts[i, j] = 0
max_density[i, j] = 0
continue
# Select out the observations for this hue level
hue_mask = self.plot_hues[i] == hue_level
# Strip missing datapoints
kde_data = remove_na(group_data[hue_mask])
# Handle special case of no data at this level
if kde_data.size == 0:
support[i].append(np.array([]))
density[i].append(np.array([1.]))
counts[i, j] = 0
max_density[i, j] = 0
continue
# Handle special case of a single unique datapoint
elif np.unique(kde_data).size == 1:
support[i].append(np.unique(kde_data))
density[i].append(np.array([1.]))
counts[i, j] = 1
max_density[i, j] = 0
continue
# Fit the KDE and get the used bandwidth size
kde, bw_used = self.fit_kde(kde_data, bw)
# Determine the support grid and get the density over it
support_ij = self.kde_support(kde_data, bw_used,
cut, gridsize)
density_ij = kde.evaluate(support_ij)
# Update the data structures with these results
support[i].append(support_ij)
density[i].append(density_ij)
counts[i, j] = kde_data.size
max_density[i, j] = density_ij.max()
# Scale the height of the density curve.
# For a violinplot the density is non-quantitative.
# The objective here is to scale the curves relative to 1 so that
# they can be multiplied by the width parameter during plotting.
if scale == "area":
self.scale_area(density, max_density, scale_hue)
elif scale == "width":
self.scale_width(density)
elif scale == "count":
self.scale_count(density, counts, scale_hue)
else:
raise ValueError("scale method '{}' not recognized".format(scale))
# Set object attributes that will be used while plotting
self.support = support
self.density = density
def fit_kde(self, x, bw):
"""Estimate a KDE for a vector of data with flexible bandwidth."""
# Allow for the use of old scipy where `bw` is fixed
try:
kde = stats.gaussian_kde(x, bw)
except TypeError:
kde = stats.gaussian_kde(x)
if bw != "scott": # scipy default
msg = ("Ignoring bandwidth choice, "
"please upgrade scipy to use a different bandwidth.")
warnings.warn(msg, UserWarning)
# Extract the numeric bandwidth from the KDE object
bw_used = kde.factor
# At this point, bw will be a numeric scale factor.
# To get the actual bandwidth of the kernel, we multiple by the
# unbiased standard deviation of the data, which we will use
# elsewhere to compute the range of the support.
bw_used = bw_used * x.std(ddof=1)
return kde, bw_used
def kde_support(self, x, bw, cut, gridsize):
"""Define a grid of support for the violin."""
support_min = x.min() - bw * cut
support_max = x.max() + bw * cut
return np.linspace(support_min, support_max, gridsize)
def scale_area(self, density, max_density, scale_hue):
"""Scale the relative area under the KDE curve.
This essentially preserves the "standard" KDE scaling, but the
resulting maximum density will be 1 so that the curve can be
properly multiplied by the violin width.
"""
if self.hue_names is None:
for d in density:
if d.size > 1:
d /= max_density.max()
else:
for i, group in enumerate(density):
for d in group:
if scale_hue:
max = max_density[i].max()
else:
max = max_density.max()
if d.size > 1:
d /= max
def scale_width(self, density):
"""Scale each density curve to the same height."""
if self.hue_names is None:
for d in density:
d /= d.max()
else:
for group in density:
for d in group:
d /= d.max()
def scale_count(self, density, counts, scale_hue):
"""Scale each density curve by the number of observations."""
if self.hue_names is None:
for count, d in zip(counts, density):
d /= d.max()
d *= count / counts.max()
else:
for i, group in enumerate(density):
for j, d in enumerate(group):
count = counts[i, j]
if scale_hue:
scaler = count / counts[i].max()
else:
scaler = count / counts.max()
d /= d.max()
d *= scaler
@property
def dwidth(self):
if self.hue_names is None:
return self.width / 2
elif self.split:
return self.width / 2
else:
return self.width / (2 * len(self.hue_names))
def draw_violins(self, ax):
"""Draw the violins onto `ax`."""
fill_func = ax.fill_betweenx if self.orient == "v" else ax.fill_between
for i, group_data in enumerate(self.plot_data):
kws = dict(edgecolor=self.gray, linewidth=self.linewidth)
# Option 1: we have a single level of grouping
# --------------------------------------------
if self.plot_hues is None:
support, density = self.support[i], self.density[i]
# Handle special case of no observations in this bin
if support.size == 0:
continue
# Handle special case of a single observation
elif support.size == 1:
val = np.asscalar(support)
d = np.asscalar(density)
self.draw_single_observation(ax, i, val, d)
continue
# Draw the violin for this group
grid = np.ones(self.gridsize) * i
fill_func(support,
grid - density * self.dwidth,
grid + density * self.dwidth,
color=self.colors[i],
**kws)
# Draw the interior representation of the data
if self.inner is None:
continue
# Get a nan-free vector of datapoints
violin_data = remove_na(group_data)
# Draw box and whisker information
if self.inner.startswith("box"):
self.draw_box_lines(ax, violin_data, support, density, i)
# Draw quartile lines
elif self.inner.startswith("quart"):
self.draw_quartiles(ax, violin_data, support, density, i)
# Draw stick observations
elif self.inner.startswith("stick"):
self.draw_stick_lines(ax, violin_data, support, density, i)
# Draw point observations
elif self.inner.startswith("point"):
self.draw_points(ax, violin_data, i)
# Option 2: we have nested grouping by a hue variable
# ---------------------------------------------------
else:
offsets = self.hue_offsets
for j, hue_level in enumerate(self.hue_names):
support, density = self.support[i][j], self.density[i][j]
kws["color"] = self.colors[j]
# Add legend data, but just for one set of violins
if not i:
self.add_legend_data(ax, self.colors[j], hue_level)
# Handle the special case where we have no observations
if support.size == 0:
continue
# Handle the special case where we have one observation
elif support.size == 1:
val = np.asscalar(support)
d = np.asscalar(density)
if self.split:
d = d / 2
at_group = i + offsets[j]
self.draw_single_observation(ax, at_group, val, d)
continue
# Option 2a: we are drawing a single split violin
# -----------------------------------------------
if self.split:
grid = np.ones(self.gridsize) * i
if j:
fill_func(support,
grid,
grid + density * self.dwidth,
**kws)
else:
fill_func(support,
grid - density * self.dwidth,
grid,
**kws)
# Draw the interior representation of the data
if self.inner is None:
continue
# Get a nan-free vector of datapoints
hue_mask = self.plot_hues[i] == hue_level
violin_data = remove_na(group_data[hue_mask])
# Draw quartile lines
if self.inner.startswith("quart"):
self.draw_quartiles(ax, violin_data,
support, density, i,
["left", "right"][j])
# Draw stick observations
elif self.inner.startswith("stick"):
self.draw_stick_lines(ax, violin_data,
support, density, i,
["left", "right"][j])
# The box and point interior plots are drawn for
# all data at the group level, so we just do that once
if not j:
continue
# Get the whole vector for this group level
violin_data = remove_na(group_data)
# Draw box and whisker information
if self.inner.startswith("box"):
self.draw_box_lines(ax, violin_data,
support, density, i)
# Draw point observations
elif self.inner.startswith("point"):
self.draw_points(ax, violin_data, i)
# Option 2b: we are drawing full nested violins
# -----------------------------------------------
else:
grid = np.ones(self.gridsize) * (i + offsets[j])
fill_func(support,
grid - density * self.dwidth,
grid + density * self.dwidth,
**kws)
# Draw the interior representation
if self.inner is None:
continue
# Get a nan-free vector of datapoints
hue_mask = self.plot_hues[i] == hue_level
violin_data = remove_na(group_data[hue_mask])
# Draw box and whisker information
if self.inner.startswith("box"):
self.draw_box_lines(ax, violin_data,
support, density,
i + offsets[j])
# Draw quartile lines
elif self.inner.startswith("quart"):
self.draw_quartiles(ax, violin_data,
support, density,
i + offsets[j])
# Draw stick observations
elif self.inner.startswith("stick"):
self.draw_stick_lines(ax, violin_data,
support, density,
i + offsets[j])
# Draw point observations
elif self.inner.startswith("point"):
self.draw_points(ax, violin_data, i + offsets[j])
def draw_single_observation(self, ax, at_group, at_quant, density):
"""Draw a line to mark a single observation."""
d_width = density * self.dwidth
if self.orient == "v":
ax.plot([at_group - d_width, at_group + d_width],
[at_quant, at_quant],
color=self.gray,
linewidth=self.linewidth)
else:
ax.plot([at_quant, at_quant],
[at_group - d_width, at_group + d_width],
color=self.gray,
linewidth=self.linewidth)
def draw_box_lines(self, ax, data, support, density, center):
"""Draw boxplot information at center of the density."""
# Compute the boxplot statistics
q25, q50, q75 = np.percentile(data, [25, 50, 75])
whisker_lim = 1.5 * iqr(data)
h1 = np.min(data[data >= (q25 - whisker_lim)])
h2 = np.max(data[data <= (q75 + whisker_lim)])
# Draw a boxplot using lines and a point
if self.orient == "v":
ax.plot([center, center], [h1, h2],
linewidth=self.linewidth,
color=self.gray)
ax.plot([center, center], [q25, q75],
linewidth=self.linewidth * 3,
color=self.gray)
ax.scatter(center, q50,
zorder=3,
color="white",
edgecolor=self.gray,
s=np.square(self.linewidth * 2))
else:
ax.plot([h1, h2], [center, center],
linewidth=self.linewidth,
color=self.gray)
ax.plot([q25, q75], [center, center],
linewidth=self.linewidth * 3,
color=self.gray)
ax.scatter(q50, center,
zorder=3,
color="white",
edgecolor=self.gray,
s=np.square(self.linewidth * 2))
def draw_quartiles(self, ax, data, support, density, center, split=False):
"""Draw the quartiles as lines at width of density."""
q25, q50, q75 = np.percentile(data, [25, 50, 75])
self.draw_to_density(ax, center, q25, support, density, split,
linewidth=self.linewidth,
dashes=[self.linewidth * 1.5] * 2)
self.draw_to_density(ax, center, q50, support, density, split,
linewidth=self.linewidth,
dashes=[self.linewidth * 3] * 2)
self.draw_to_density(ax, center, q75, support, density, split,
linewidth=self.linewidth,
dashes=[self.linewidth * 1.5] * 2)
def draw_points(self, ax, data, center):
"""Draw individual observations as points at middle of the violin."""
kws = dict(s=np.square(self.linewidth * 2),
c=self.gray,
edgecolor=self.gray)
grid = np.ones(len(data)) * center
if self.orient == "v":
ax.scatter(grid, data, **kws)
else:
ax.scatter(data, grid, **kws)
def draw_stick_lines(self, ax, data, support, density,
center, split=False):
"""Draw individual observations as sticks at width of density."""
for val in data:
self.draw_to_density(ax, center, val, support, density, split,
linewidth=self.linewidth * .5)
def draw_to_density(self, ax, center, val, support, density, split, **kws):
"""Draw a line orthogonal to the value axis at width of density."""
idx = np.argmin(np.abs(support - val))
width = self.dwidth * density[idx] * .99
kws["color"] = self.gray
if self.orient == "v":
if split == "left":
ax.plot([center - width, center], [val, val], **kws)
elif split == "right":
ax.plot([center, center + width], [val, val], **kws)
else:
ax.plot([center - width, center + width], [val, val], **kws)
else:
if split == "left":
ax.plot([val, val], [center - width, center], **kws)
elif split == "right":
ax.plot([val, val], [center, center + width], **kws)
else:
ax.plot([val, val], [center - width, center + width], **kws)
def plot(self, ax):
"""Make the violin plot."""
self.draw_violins(ax)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
class _StripPlotter(_CategoricalPlotter):
"""1-d scatterplot with categorical organization."""
def __init__(self, x, y, hue, data, order, hue_order,
jitter, split, orient, color, palette):
"""Initialize the plotter."""
self.establish_variables(x, y, hue, data, orient, order, hue_order)
self.establish_colors(color, palette, 1)
# Set object attributes
self.split = split
self.width = .8
if jitter == 1: # Use a good default for `jitter = True`
jlim = 0.1
else:
jlim = float(jitter)
if self.hue_names is not None and split:
jlim /= len(self.hue_names)
self.jitterer = stats.uniform(-jlim, jlim * 2).rvs
def draw_stripplot(self, ax, kws):
"""Draw the points onto `ax`."""
# Set the default zorder to 2.1, so that the points
# will be drawn on top of line elements (like in a boxplot)
kws.setdefault("zorder", 2.1)
for i, group_data in enumerate(self.plot_data):
if self.plot_hues is None:
# Determine the positions of the points
strip_data = remove_na(group_data)
jitter = self.jitterer(len(strip_data))
kws["color"] = self.colors[i]
# Draw the plot
if self.orient == "v":
ax.scatter(i + jitter, strip_data, **kws)
else:
ax.scatter(strip_data, i + jitter, **kws)
else:
offsets = self.hue_offsets
for j, hue_level in enumerate(self.hue_names):
hue_mask = self.plot_hues[i] == hue_level
if not hue_mask.any():
continue
# Determine the positions of the points
strip_data = remove_na(group_data[hue_mask])
pos = i + offsets[j] if self.split else i
jitter = self.jitterer(len(strip_data))
kws["color"] = self.colors[j]
# Only label one set of plots
if i:
kws.pop("label", None)
else:
kws["label"] = hue_level
# Draw the plot
if self.orient == "v":
ax.scatter(pos + jitter, strip_data, **kws)
else:
ax.scatter(strip_data, pos + jitter, **kws)
def plot(self, ax, kws):
"""Make the plot."""
self.draw_stripplot(ax, kws)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
class _SwarmPlotter(_BoxPlotter):
def __init__(self):
pass
def plot(self, ax):
pass
class _CategoricalStatPlotter(_CategoricalPlotter):
@property
def nested_width(self):
"""A float with the width of plot elements when hue nesting is used."""
return self.width / len(self.hue_names)
def estimate_statistic(self, estimator, ci, n_boot):
if self.hue_names is None:
statistic = []
confint = []
else:
statistic = [[] for _ in self.plot_data]
confint = [[] for _ in self.plot_data]
for i, group_data in enumerate(self.plot_data):
# Option 1: we have a single layer of grouping
# --------------------------------------------
if self.plot_hues is None:
if self.plot_units is None:
stat_data = remove_na(group_data)
unit_data = None
else:
unit_data = self.plot_units[i]
have = pd.notnull(np.c_[group_data, unit_data]).all(axis=1)
stat_data = group_data[have]
unit_data = unit_data[have]
# Estimate a statistic from the vector of data
if not stat_data.size:
statistic.append(np.nan)
else:
statistic.append(estimator(stat_data))
# Get a confidence interval for this estimate
if ci is not None:
if stat_data.size < 2:
confint.append([np.nan, np.nan])
continue
boots = bootstrap(stat_data, func=estimator,
n_boot=n_boot,
units=unit_data)
confint.append(utils.ci(boots, ci))
# Option 2: we are grouping by a hue layer
# ----------------------------------------
else:
for j, hue_level in enumerate(self.hue_names):
if not self.plot_hues[i].size:
statistic[i].append(np.nan)
if ci is not None:
confint[i].append((np.nan, np.nan))
continue
hue_mask = self.plot_hues[i] == hue_level
if self.plot_units is None:
stat_data = remove_na(group_data[hue_mask])
unit_data = None
else:
group_units = self.plot_units[i]
have = pd.notnull(
np.c_[group_data, group_units]
).all(axis=1)
stat_data = group_data[hue_mask & have]
unit_data = group_units[hue_mask & have]
# Estimate a statistic from the vector of data
if not stat_data.size:
statistic[i].append(np.nan)
else:
statistic[i].append(estimator(stat_data))
# Get a confidence interval for this estimate
if ci is not None:
if stat_data.size < 2:
confint[i].append([np.nan, np.nan])
continue
boots = bootstrap(stat_data, func=estimator,
n_boot=n_boot,
units=unit_data)
confint[i].append(utils.ci(boots, ci))
# Save the resulting values for plotting
self.statistic = np.array(statistic)
self.confint = np.array(confint)
# Rename the value label to reflect the estimation
if self.value_label is not None:
self.value_label = "{}({})".format(estimator.__name__,
self.value_label)
def draw_confints(self, ax, at_group, confint, colors, **kws):
kws.setdefault("lw", mpl.rcParams["lines.linewidth"] * 1.8)
for at, (ci_low, ci_high), color in zip(at_group,
confint,
colors):
if self.orient == "v":
ax.plot([at, at], [ci_low, ci_high], color=color, **kws)
else:
ax.plot([ci_low, ci_high], [at, at], color=color, **kws)
class _BarPlotter(_CategoricalStatPlotter):
"""Show point estimates and confidence intervals with bars."""
def __init__(self, x, y, hue, data, order, hue_order,
estimator, ci, n_boot, units,
orient, color, palette, saturation, errcolor):
"""Initialize the plotter."""
self.establish_variables(x, y, hue, data, orient,
order, hue_order, units)
self.establish_colors(color, palette, saturation)
self.estimate_statistic(estimator, ci, n_boot)
self.errcolor = errcolor
def draw_bars(self, ax, kws):
"""Draw the bars onto `ax`."""
# Get the right matplotlib function depending on the orientation
barfunc = ax.bar if self.orient == "v" else ax.barh
barpos = np.arange(len(self.statistic))
if self.plot_hues is None:
# Draw the bars
barfunc(barpos, self.statistic, self.width,
color=self.colors, align="center", **kws)
# Draw the confidence intervals
errcolors = [self.errcolor] * len(barpos)
self.draw_confints(ax, barpos, self.confint, errcolors)
else:
for j, hue_level in enumerate(self.hue_names):
# Draw the bars
offpos = barpos + self.hue_offsets[j]
barfunc(offpos, self.statistic[:, j], self.nested_width,
color=self.colors[j], align="center",
label=hue_level, **kws)
# Draw the confidence intervals
if self.confint.size:
confint = self.confint[:, j]
errcolors = [self.errcolor] * len(offpos)
self.draw_confints(ax, offpos, confint, errcolors)
def plot(self, ax, bar_kws):
"""Make the plot."""
self.draw_bars(ax, bar_kws)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
class _PointPlotter(_CategoricalStatPlotter):
"""Show point estimates and confidence intervals with (joined) points."""
def __init__(self, x, y, hue, data, order, hue_order,
estimator, ci, n_boot, units,
markers, linestyles, dodge, join, scale,
orient, color, palette):
"""Initialize the plotter."""
self.establish_variables(x, y, hue, data, orient,
order, hue_order, units)
self.establish_colors(color, palette, 1)
self.estimate_statistic(estimator, ci, n_boot)
# Override the default palette for single-color plots
if hue is None and color is None and palette is None:
self.colors = [color_palette()[0]] * len(self.colors)
# Don't join single-layer plots with different colors
if hue is None and palette is not None:
join = False
# Use a good default for `dodge=True`
if dodge is True and self.hue_names is not None:
dodge = .025 * len(self.hue_names)
# Make sure we have a marker for each hue level
if isinstance(markers, string_types):
markers = [markers] * len(self.colors)
self.markers = markers
# Make sure we have a line style for each hue level
if isinstance(linestyles, string_types):
linestyles = [linestyles] * len(self.colors)
self.linestyles = linestyles
# Set the other plot components
self.dodge = dodge
self.join = join
self.scale = scale
@property
def hue_offsets(self):
"""Offsets relative to the center position for each hue level."""
offset = np.linspace(0, self.dodge, len(self.hue_names))
offset -= offset.mean()
return offset
def draw_points(self, ax):
"""Draw the main data components of the plot."""
# Get the center positions on the categorical axis
pointpos = np.arange(len(self.statistic))
# Get the size of the plot elements
lw = mpl.rcParams["lines.linewidth"] * 1.8 * self.scale
mew = lw * .75
markersize = np.pi * np.square(lw) * 2
if self.plot_hues is None:
# Draw lines joining each estimate point
if self.join:
color = self.colors[0]
ls = self.linestyles[0]
if self.orient == "h":
ax.plot(self.statistic, pointpos,
color=color, ls=ls, lw=lw)
else:
ax.plot(pointpos, self.statistic,
color=color, ls=ls, lw=lw)
# Draw the confidence intervals
self.draw_confints(ax, pointpos, self.confint, self.colors, lw=lw)
# Draw the estimate points
marker = self.markers[0]
if self.orient == "h":
ax.scatter(self.statistic, pointpos,
linewidth=mew, marker=marker, s=markersize,
c=self.colors, edgecolor=self.colors)
else:
ax.scatter(pointpos, self.statistic,
linewidth=mew, marker=marker, s=markersize,
c=self.colors, edgecolor=self.colors)
else:
offsets = self.hue_offsets
for j, hue_level in enumerate(self.hue_names):
# Determine the values to plot for this level
statistic = self.statistic[:, j]
# Determine the position on the categorical and z axes
offpos = pointpos + offsets[j]
z = j + 1
# Draw lines joining each estimate point
if self.join:
color = self.colors[j]
ls = self.linestyles[j]
if self.orient == "h":
ax.plot(statistic, offpos, color=color,
zorder=z, ls=ls, lw=lw)
else:
ax.plot(offpos, statistic, color=color,
zorder=z, ls=ls, lw=lw)
# Draw the confidence intervals
if self.confint.size:
confint = self.confint[:, j]
errcolors = [self.colors[j]] * len(offpos)
self.draw_confints(ax, offpos, confint, errcolors,
zorder=z, lw=lw)
# Draw the estimate points
marker = self.markers[j]
if self.orient == "h":
ax.scatter(statistic, offpos, label=hue_level,
c=[self.colors[j]] * len(offpos),
linewidth=mew, marker=marker, s=markersize,
edgecolor=self.colors[j], zorder=z)
else:
ax.scatter(offpos, statistic, label=hue_level,
c=[self.colors[j]] * len(offpos),
linewidth=mew, marker=marker, s=markersize,
edgecolor=self.colors[j], zorder=z)
def plot(self, ax):
"""Make the plot."""
self.draw_points(ax)
self.annotate_axes(ax)
if self.orient == "h":
ax.invert_yaxis()
_categorical_docs = dict(
# Shared narrative docs
main_api_narrative=dedent("""\
Input data can be passed in a variety of formats, including:
- Vectors of data represented as lists, numpy arrays, or pandas Series
objects passed directly to the ``x``, ``y``, and/or ``hue`` parameters.
- A "long-form" DataFrame, in which case the ``x``, ``y``, and ``hue``
variables will determine how the data are plotted.
- A "wide-form" DataFrame, such that each numeric column will be plotted.
- Anything accepted by ``plt.boxplot`` (e.g. a 2d array or list of vectors)
In most cases, it is possible to use numpy or Python objects, but pandas
objects are preferable because the associated names will be used to
annotate the axes. Additionally, you can use Categorical types for the
grouping variables to control the order of plot elements.\
"""),
# Shared function parameters
input_params=dedent("""\
x, y, hue : names of variables in ``data`` or vector data, optional
Inputs for plotting long-form data. See examples for interpretation.\
"""),
string_input_params=dedent("""\
x, y, hue : names of variables in ``data``
Inputs for plotting long-form data. See examples for interpretation.\
"""),
categorical_data=dedent("""\
data : DataFrame, array, or list of arrays, optional
Dataset for plotting. If ``x`` and ``y`` are absent, this is
interpreted as wide-form. Otherwise it is expected to be long-form.\
"""),
long_form_data=dedent("""\
data : DataFrame
Long-form (tidy) dataset for plotting. Each column should correspond
to a variable, and each row should correspond to an observation.\
"""),
order_vars=dedent("""\
order, hue_order : lists of strings, optional
Order to plot the categorical levels in, otherwise the levels are
inferred from the data objects.\
"""),
stat_api_params=dedent("""\
estimator : callable that maps vector -> scalar, optional
Statistical function to estimate within each categorical bin.
ci : float or None, optional
Size of confidence intervals to draw around estimated values. If
``None``, no bootstrapping will be performed, and error bars will
not be drawn.
n_boot : int, optional
Number of bootstrap iterations to use when computing confidence
intervals.
units : name of variable in ``data`` or vector data, optional
Identifier of sampling units, which will be used to perform a
multilevel bootstrap and account for repeated measures design.\
"""),
orient=dedent("""\
orient : "v" | "h", optional
Orientation of the plot (vertical or horizontal). This is usually
inferred from the dtype of the input variables, but can be used to
specify when the "categorical" variable is a numeric or when plotting
wide-form data.\
"""),
color=dedent("""\
color : matplotlib color, optional
Color for all of the elements, or seed for :func:`light_palette` when
using hue nesting.\
"""),
palette=dedent("""\
palette : palette name, list, or dict, optional
Color palette that maps either the grouping variable or the hue
variable. If the palette is a dictionary, keys should be names of
levels and values should be matplotlib colors.\
"""),
saturation=dedent("""\
saturation : float, optional
Proportion of the original saturation to draw colors at. Large patches
often look better with slightly desaturated colors, but set this to
``1`` if you want the plot colors to perfectly match the input color
spec.\
"""),
width=dedent("""\
width : float, optional
Width of a full element when not using hue nesting, or width of all the
elements for one level of the major grouping variable.\
"""),
linewidth=dedent("""\
linewidth : float, optional
Width of the gray lines that frame the plot elements.\
"""),
ax_in=dedent("""\
ax : matplotlib Axes, optional
Axes object to draw the plot onto, otherwise uses the current Axes.\
"""),
ax_out=dedent("""\
ax : matplotlib Axes
Returns the Axes object with the boxplot drawn onto it.\
"""),
# Shared see also
boxplot=dedent("""\
boxplot : A traditional box-and-whisker plot with a similar API.\
"""),
violinplot=dedent("""\
violinplot : A combination of boxplot and kernel density estimation.\
"""),
stripplot=dedent("""\
stripplot : A scatterplot where one variable is categorical. Can be used
in conjunction with a other plots to show each observation.\
"""),
barplot=dedent("""\
barplot : Show point estimates and confidence intervals using bars.\
"""),
countplot=dedent("""\
countplot : Show the counts of observations in each categorical bin.\
"""),
pointplot=dedent("""\
pointplot : Show point estimates and confidence intervals using scatterplot
glyphs.\
"""),
factorplot=dedent("""\
factorplot : Combine categorical plots and a class:`FacetGrid`.\
"""),
)
_categorical_docs.update(_facet_docs)
def boxplot(x=None, y=None, hue=None, data=None, order=None, hue_order=None,
orient=None, color=None, palette=None, saturation=.75,
width=.8, fliersize=5, linewidth=None, whis=1.5, notch=False,
ax=None, **kwargs):
# Try to handle broken backwards-compatability
# This should help with the lack of a smooth deprecation,
# but won't catch everything
warn = False
if isinstance(x, pd.DataFrame):
data = x
x = None
warn = True
if "vals" in kwargs:
x = kwargs.pop("vals")
warn = True
if "groupby" in kwargs:
y = x
x = kwargs.pop("groupby")
warn = True
if "vert" in kwargs:
vert = kwargs.pop("vert", True)
if not vert:
x, y = y, x
orient = "v" if vert else "h"
warn = True
if "names" in kwargs:
kwargs.pop("names")
warn = True
if "join_rm" in kwargs:
kwargs.pop("join_rm")
warn = True
msg = ("The boxplot API has been changed. Attempting to adjust your "
"arguments for the new API (which might not work). Please update "
"your code. See the version 0.6 release notes for more info.")
if warn:
warnings.warn(msg, UserWarning)
plotter = _BoxPlotter(x, y, hue, data, order, hue_order,
orient, color, palette, saturation,
width, fliersize, linewidth)
if ax is None:
ax = plt.gca()
kwargs.update(dict(whis=whis, notch=notch))
plotter.plot(ax, kwargs)
return ax
boxplot.__doc__ = dedent("""\
Draw a box plot to show distributions with respect to categories.
A box plot (or box-and-whisker plot) shows the distribution of quantitative
data in a way that facilitates comparisons between variables or across
levels of a categorical variable. The box shows the quartiles of the
dataset while the whiskers extend to show the rest of the distribution,
except for points that are determined to be "outliers" using a method
that is a function of the inter-quartile range.
{main_api_narrative}
Parameters
----------
{input_params}
{categorical_data}
{order_vars}
{orient}
{color}
{palette}
{saturation}
{width}
fliersize : float, optional
Size of the markers used to indicate outlier observations.
{linewidth}
whis : float, optional
Proportion of the IQR past the low and high quartiles to extend the
plot whiskers. Points outside this range will be identified as
outliers.
notch : boolean, optional
Whether to "notch" the box to indicate a confidence interval for the
median. There are several other parameters that can control how the
notches are drawn; see the ``plt.boxplot`` help for more information
on them.
{ax_in}
kwargs : key, value mappings
Other keyword arguments are passed through to ``plt.boxplot`` at draw
time.
Returns
-------
{ax_out}
See Also
--------
{violinplot}
{stripplot}
Examples
--------
Draw a single horizontal boxplot:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set_style("whitegrid")
>>> tips = sns.load_dataset("tips")
>>> ax = sns.boxplot(x=tips["total_bill"])
Draw a vertical boxplot grouped by a categorical variable:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="day", y="total_bill", data=tips)
Draw a boxplot with nested grouping by two categorical variables:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="day", y="total_bill", hue="smoker",
... data=tips, palette="Set3")
Draw a boxplot with nested grouping when some bins are empty:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="day", y="total_bill", hue="time",
... data=tips, linewidth=2.5)
Control box order by sorting the input data:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="size", y="tip", data=tips.sort("size"))
Control box order by passing an explicit order:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="size", y="tip", data=tips,
... order=np.arange(1, 7), palette="Blues_d")
Draw a boxplot for each numeric variable in a DataFrame:
.. plot::
:context: close-figs
>>> iris = sns.load_dataset("iris")
>>> ax = sns.boxplot(data=iris, orient="h", palette="Set2")
Use :func:`stripplot` to show the datapoints on top of the boxes:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="day", y="total_bill", data=tips)
>>> ax = sns.stripplot(x="day", y="total_bill", data=tips,
... size=4, jitter=True, edgecolor="gray")
Draw a box plot on to a :class:`FacetGrid` to group within an additional
categorical variable:
.. plot::
:context: close-figs
>>> g = sns.FacetGrid(tips, col="time", size=4, aspect=.7)
>>> (g.map(sns.boxplot, "sex", "total_bill", "smoker")
... .despine(left=True)
... .add_legend(title="smoker")) #doctest: +ELLIPSIS
<seaborn.axisgrid.FacetGrid object at 0x...>
""").format(**_categorical_docs)
def violinplot(x=None, y=None, hue=None, data=None, order=None, hue_order=None,
bw="scott", cut=2, scale="area", scale_hue=True, gridsize=100,
width=.8, inner="box", split=False, orient=None, linewidth=None,
color=None, palette=None, saturation=.75, ax=None, **kwargs):
# Try to handle broken backwards-compatability
# This should help with the lack of a smooth deprecation,
# but won't catch everything
warn = False
if isinstance(x, pd.DataFrame):
data = x
x = None
warn = True
if "vals" in kwargs:
x = kwargs.pop("vals")
warn = True
if "groupby" in kwargs:
y = x
x = kwargs.pop("groupby")
warn = True
if "vert" in kwargs:
vert = kwargs.pop("vert", True)
if not vert:
x, y = y, x
orient = "v" if vert else "h"
warn = True
msg = ("The violinplot API has been changed. Attempting to adjust your "
"arguments for the new API (which might not work). Please update "
"your code. See the version 0.6 release notes for more info.")
if warn:
warnings.warn(msg, UserWarning)
plotter = _ViolinPlotter(x, y, hue, data, order, hue_order,
bw, cut, scale, scale_hue, gridsize,
width, inner, split, orient, linewidth,
color, palette, saturation)
if ax is None:
ax = plt.gca()
plotter.plot(ax)
return ax
violinplot.__doc__ = dedent("""\
Draw a combination of boxplot and kernel density estimate.
A violin plot plays a similar role as a box and whisker plot. It shows the
distribution of quantitative data across several levels of one (or more)
categorical variables such that those distributions can be compared. Unlike
a box plot, in which all of the plot components correspond to actual
datapoints, the violin plot features a kernel density estimation of the
underlying distribution.
This can be an effective and attractive way to show multiple distributions
of data at once, but keep in mind that the estimation procedure is
influenced by the sample size, and violins for relatively small samples
might look misleadingly smooth.
{main_api_narrative}
Parameters
----------
{input_params}
{categorical_data}
{order_vars}
bw : {{'scott', 'silverman', float}}, optional
Either the name of a reference rule or the scale factor to use when
computing the kernel bandwidth. The actual kernel size will be
determined by multiplying the scale factor by the standard deviation of
the data within each bin.
cut : float, optional
Distance, in units of bandwidth size, to extend the density past the
extreme datapoints. Set to 0 to limit the violin range within the range
of the observed data (i.e., to have the same effect as ``trim=True`` in
``ggplot``.
scale : {{"area", "count", "width"}}, optional
The method used to scale the width of each violin. If ``area``, each
violin will have the same area. If ``count``, the width of the violins
will be scaled by the number of observations in that bin. If ``width``,
each violin will have the same width.
scale_hue : bool, optional
When nesting violins using a ``hue`` variable, this parameter
determines whether the scaling is computed within each level of the
major grouping variable (``scale_hue=True``) or across all the violins
on the plot (``scale_hue=False``).
gridsize : int, optional
Number of points in the discrete grid used to compute the kernel
density estimate.
{width}
inner : {{"box", "quartile", "point", "stick", None}}, optional
Representation of the datapoints in the violin interior. If ``box``,
draw a miniature boxplot. If ``quartiles``, draw the quartiles of the
distribution. If ``point`` or ``stick``, show each underlying
datapoint. Using ``None`` will draw unadorned violins.
split : bool, optional
When using hue nesting with a variable that takes two levels, setting
``split`` to True will draw half of a violin for each level. This can
make it easier to directly compare the distributions.
{orient}
{linewidth}
{color}
{palette}
{saturation}
{ax_in}
Returns
-------
{ax_out}
See Also
--------
{boxplot}
{stripplot}
Examples
--------
Draw a single horizontal violinplot:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set_style("whitegrid")
>>> tips = sns.load_dataset("tips")
>>> ax = sns.violinplot(x=tips["total_bill"])
Draw a vertical violinplot grouped by a categorical variable:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", data=tips)
Draw a violinplot with nested grouping by two categorical variables:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="smoker",
... data=tips, palette="muted")
Draw split violins to compare the across the hue variable:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="smoker",
... data=tips, palette="muted", split=True)
Control violin order by sorting the input data:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="size", y="tip", data=tips.sort("size"))
Control violin order by passing an explicit order:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="size", y="tip", data=tips,
... order=np.arange(1, 7), palette="Blues_d")
Scale the violin width by the number of observations in each bin:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="sex",
... data=tips, palette="Set2", split=True,
... scale="count")
Draw the quartiles as horizontal lines instead of a mini-box:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="sex",
... data=tips, palette="Set2", split=True,
... scale="count", inner="quartile")
Show each observation with a stick inside the violin:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="sex",
... data=tips, palette="Set2", split=True,
... scale="count", inner="stick")
Scale the density relative to the counts across all bins:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="sex",
... data=tips, palette="Set2", split=True,
... scale="count", inner="stick", scale_hue=False)
Use a narrow bandwidth to reduce the amount of smoothing:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", hue="sex",
... data=tips, palette="Set2", split=True,
... scale="count", inner="stick",
... scale_hue=False, bw=.2)
Draw horizontal violins:
.. plot::
:context: close-figs
>>> planets = sns.load_dataset("planets")
>>> ax = sns.violinplot(x="orbital_period", y="method",
... data=planets[planets.orbital_period < 1000],
... scale="width", palette="Set3")
Draw a violin plot on to a :class:`FacetGrid` to group within an additional
categorical variable:
.. plot::
:context: close-figs
>>> g = sns.FacetGrid(tips, col="time", size=4, aspect=.7)
>>> (g.map(sns.violinplot, "sex", "total_bill", "smoker", split=True)
... .despine(left=True)
... .add_legend(title="smoker")) # doctest: +ELLIPSIS
<seaborn.axisgrid.FacetGrid object at 0x...>
""").format(**_categorical_docs)
def stripplot(x=None, y=None, hue=None, data=None, order=None, hue_order=None,
jitter=False, split=True, orient=None, color=None, palette=None,
size=7, edgecolor="w", linewidth=1, ax=None, **kwargs):
plotter = _StripPlotter(x, y, hue, data, order, hue_order,
jitter, split, orient, color, palette)
if ax is None:
ax = plt.gca()
kwargs.update(dict(s=size ** 2, edgecolor=edgecolor, linewidth=linewidth))
if edgecolor == "gray":
kwargs["edgecolor"] = plotter.gray
plotter.plot(ax, kwargs)
return ax
stripplot.__doc__ = dedent("""\
Draw a scatterplot where one variable is categorical.
A strip plot can be drawn on its own, but it is also a good complement
to a box or violin plot in cases where you want to show all observations
along with some representation of the underlying distribution.
{main_api_narrative}
Parameters
----------
{input_params}
{categorical_data}
{order_vars}
jitter : float, ``True``/``1`` is special-cased, optional
Amount of jitter (only along the categorical axis) to apply. This
can be useful when you have many points and they overlap, so that
it is easier to see the distribution. You can specify the amount
of jitter (half the width of the uniform random variable support),
or just use ``True`` for a good default.
split : bool, optional
When using ``hue`` nesting, setting this to ``True`` will separate
the strips for different hue levels along the categorical axis.
Otherwise, the points for each level will be plotted on top of
each other.
{orient}
{color}
{palette}
size : float, optional
Diameter of the markers, in points. (Although ``plt.scatter`` is used
to draw the points, the ``size`` argument here takes a "normal"
markersize and not size^2 like ``plt.scatter``.
edgecolor : matplotlib color, "gray" is special-cased, optional
Color of the lines around each point. If you pass ``"gray"``, the
brightness is determined by the color palette used for the body
of the points.
{linewidth}
{ax_in}
Returns
-------
{ax_out}
See Also
--------
{boxplot}
{violinplot}
Examples
--------
Draw a single horizontal strip plot:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set_style("whitegrid")
>>> tips = sns.load_dataset("tips")
>>> ax = sns.stripplot(x=tips["total_bill"])
Group the strips by a categorical variable:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="day", y="total_bill", data=tips)
Add jitter to bring out the distribution of values:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="day", y="total_bill", data=tips, jitter=True)
Use a smaller amount of jitter:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="day", y="total_bill", data=tips, jitter=0.05)
Draw horizontal strips:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="total_bill", y="day", data=tips,
... jitter=True)
Nest the strips within a second categorical variable:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="sex", y="total_bill", hue="day",
... data=tips, jitter=True)
Draw each level of the ``hue`` variable at the same location on the
major categorical axis:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="day", y="total_bill", hue="smoker",
... data=tips, jitter=True,
... palette="Set2", split=False)
Control strip order by sorting the input data:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="size", y="tip", data=tips.sort("size"))
Control strip order by passing an explicit order:
.. plot::
:context: close-figs
>>> ax = sns.stripplot(x="size", y="tip", data=tips,
... order=np.arange(1, 7), palette="Blues_d")
Draw strips with large points and different aesthetics:
.. plot::
:context: close-figs
>>> ax = sns.stripplot("day", "total_bill", "smoker", data=tips,
... palette="Set2", size=20, marker="D",
... edgecolor="gray", alpha=.25)
Draw strips of observations on top of a box plot:
.. plot::
:context: close-figs
>>> ax = sns.boxplot(x="tip", y="day", data=tips, whis=np.inf)
>>> ax = sns.stripplot(x="tip", y="day", data=tips, jitter=True)
Draw strips of observations on top of a violin plot:
.. plot::
:context: close-figs
>>> ax = sns.violinplot(x="day", y="total_bill", data=tips, inner=None)
>>> ax = sns.stripplot(x="day", y="total_bill", data=tips,
... jitter=True, color="white", edgecolor="gray")
""").format(**_categorical_docs)
def barplot(x=None, y=None, hue=None, data=None, order=None, hue_order=None,
estimator=np.mean, ci=95, n_boot=1000, units=None,
orient=None, color=None, palette=None, saturation=.75,
errcolor=".26", ax=None, **kwargs):
# Handle some deprecated arguments
if "hline" in kwargs:
kwargs.pop("hline")
warnings.warn("The `hline` parameter has been removed", UserWarning)
if "dropna" in kwargs:
kwargs.pop("dropna")
warnings.warn("The `dropna` parameter has been removed", UserWarning)
if "x_order" in kwargs:
order = kwargs.pop("x_order")
warnings.warn("The `x_order` parameter has been renamed `order`",
UserWarning)
plotter = _BarPlotter(x, y, hue, data, order, hue_order,
estimator, ci, n_boot, units,
orient, color, palette, saturation,
errcolor)
if ax is None:
ax = plt.gca()
plotter.plot(ax, kwargs)
return ax
barplot.__doc__ = dedent("""\
Show point estimates and confidence intervals as rectangular bars.
A bar plot represents an estimate of central tendency for a numeric
variable with the height of each rectangle and provides some indication of
the uncertainty around that estimate using error bars. Bar plots include 0
in the quantitative axis range, and they are a good choice when 0 is a
meaningful value for the quantitative variable, and you want to make
comparisons against it.
For datasets where 0 is not a meaningful value, a point plot will allow you
to focus on differences between levels of one or more categorical
variables.
It is also important to keep in mind that a bar plot shows only the mean
(or other estimator) value, but in many cases it may be more informative to
show the distribution of values at each level of the categorical variables.
In that case, other approaches such as a box or violin plot may be more
appropriate.
{main_api_narrative}
Parameters
----------
{input_params}
{categorical_data}
{order_vars}
{stat_api_params}
{orient}
{color}
{palette}
{saturation}
errcolor : matplotlib color
Color for the lines that represent the confidence interval.
{ax_in}
kwargs : key, value mappings
Other keyword arguments are passed through to ``plt.bar`` at draw
time.
Returns
-------
{ax_out}
See Also
--------
{countplot}
{pointplot}
{factorplot}
Examples
--------
Draw a set of vertical bar plots grouped by a categorical variable:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set_style("whitegrid")
>>> tips = sns.load_dataset("tips")
>>> ax = sns.barplot(x="day", y="total_bill", data=tips)
Draw a set of vertical bars with nested grouping by a two variables:
.. plot::
:context: close-figs
>>> ax = sns.barplot(x="day", y="total_bill", hue="sex", data=tips)
Draw a set of horizontal bars:
.. plot::
:context: close-figs
>>> ax = sns.barplot(x="tip", y="day", data=tips)
Control bar order by sorting the input data:
.. plot::
:context: close-figs
>>> ax = sns.barplot(x="size", y="tip", data=tips.sort("size"))
Control bar order by passing an explicit order:
.. plot::
:context: close-figs
>>> ax = sns.barplot(x="size", y="tip", data=tips,
... order=np.arange(1, 7), palette="Blues_d")
Use median as the estimate of central tendency:
.. plot::
:context: close-figs
>>> from numpy import median
>>> ax = sns.barplot(x="day", y="tip", data=tips, estimator=median)
Show the standard error of the mean with the error bars:
.. plot::
:context: close-figs
>>> ax = sns.barplot(x="day", y="tip", data=tips, ci=68)
Use a different color palette for the bars:
.. plot::
:context: close-figs
>>> ax = sns.barplot("size", y="total_bill", data=tips.sort("size"),
... palette="Blues_d")
Plot all bars in a single color:
.. plot::
:context: close-figs
>>> ax = sns.barplot("size", y="total_bill", data=tips.sort("size"),
... color="salmon", saturation=.5)
Use ``plt.bar`` keyword arguments to further change the aesthetic:
.. plot::
:context: close-figs
>>> ax = sns.barplot("day", "total_bill", data=tips,
... linewidth=2.5, facecolor=(1, 1, 1, 0),
... errcolor=".2", edgecolor=".2")
""").format(**_categorical_docs)
def pointplot(x=None, y=None, hue=None, data=None, order=None, hue_order=None,
estimator=np.mean, ci=95, n_boot=1000, units=None,
markers="o", linestyles="-", dodge=False, join=True, scale=1,
orient=None, color=None, palette=None, ax=None, **kwargs):
# Handle some deprecated arguments
if "hline" in kwargs:
kwargs.pop("hline")
warnings.warn("The `hline` parameter has been removed", UserWarning)
if "dropna" in kwargs:
kwargs.pop("dropna")
warnings.warn("The `dropna` parameter has been removed", UserWarning)
if "x_order" in kwargs:
order = kwargs.pop("x_order")
warnings.warn("The `x_order` parameter has been renamed `order`",
UserWarning)
plotter = _PointPlotter(x, y, hue, data, order, hue_order,
estimator, ci, n_boot, units,
markers, linestyles, dodge, join, scale,
orient, color, palette)
if ax is None:
ax = plt.gca()
plotter.plot(ax)
return ax
pointplot.__doc__ = dedent("""\
Show point estimates and confidence intervals using scatter plot glyphs.
A point plot represents an estimate of central tendency for a numeric
variable by the position of scatter plot points and provides some
indication of the uncertainty around that estimate using error bars.
Point plots can be more useful than bar plots for focusing comparisons
between different levels of one or more categorical variables. They are
particularly adept at showing interactions: how the relationship between
levels of one categorical variable changes across levels of a second
categorical variable. The lines that join each point from the same ``hue``
level allow interactions to be judged by differences in slope, which is
easier for the eyes than comparing the heights of several groups of points
or bars.
It is important to keep in mind that a point plot shows only the mean (or
other estimator) value, but in many cases it may be more informative to
show the distribution of values at each level of the categorical variables.
In that case, other approaches such as a box or violin plot may be more
appropriate.
{main_api_narrative}
Parameters
----------
{input_params}
{categorical_data}
{order_vars}
{stat_api_params}
markers : string or list of strings, optional
Markers to use for each of the ``hue`` levels.
linestyles : string or list of strings, optional
Line styles to use for each of the ``hue`` levels.
dodge : bool or float, optional
Amount to separate the points for each level of the ``hue`` variable
along the categorical axis.
join : bool, optional
If ``True``, lines will be drawn between point estimates at the same
``hue`` level.
scale : float, optional
Scale factor for the plot elements.
{orient}
{color}
{palette}
{ax_in}
Returns
-------
{ax_out}
See Also
--------
{barplot}
{factorplot}
Examples
--------
Draw a set of vertical point plots grouped by a categorical variable:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set_style("darkgrid")
>>> tips = sns.load_dataset("tips")
>>> ax = sns.pointplot(x="time", y="total_bill", data=tips)
Draw a set of vertical points with nested grouping by a two variables:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="time", y="total_bill", hue="smoker",
... data=tips)
Separate the points for different hue levels along the categorical axis:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="time", y="total_bill", hue="smoker",
... data=tips, dodge=True)
Use a different marker and line style for the hue levels:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="time", y="total_bill", hue="smoker",
... data=tips,
... markers=["o", "x"],
... linestyles=["-", "--"])
Draw a set of horizontal points:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="tip", y="day", data=tips)
Don't draw a line connecting each point:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="tip", y="day", data=tips, join=False)
Use a different color for a single-layer plot:
.. plot::
:context: close-figs
>>> ax = sns.pointplot("time", y="total_bill", data=tips,
... color="#bb3f3f")
Use a different color palette for the points:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="time", y="total_bill", hue="smoker",
... data=tips, palette="Set2")
Control point order by sorting the input data:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="size", y="tip", data=tips.sort("size"))
Control point order by passing an explicit order:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="size", y="tip", data=tips,
... order=np.arange(1, 7), palette="Blues_d")
Use median as the estimate of central tendency:
.. plot::
:context: close-figs
>>> from numpy import median
>>> ax = sns.pointplot(x="day", y="tip", data=tips, estimator=median)
Show the standard error of the mean with the error bars:
.. plot::
:context: close-figs
>>> ax = sns.pointplot(x="day", y="tip", data=tips, ci=68)
""").format(**_categorical_docs)
def countplot(x=None, y=None, hue=None, data=None, order=None, hue_order=None,
orient=None, color=None, palette=None, saturation=.75,
ax=None, **kwargs):
estimator = len
ci = None
n_boot = 0
units = None
errcolor = None
if x is None and y is not None:
orient = "h"
x = y
elif y is None and x is not None:
orient = "v"
y = x
elif x is not None and y is not None:
raise TypeError("Cannot pass values for both `x` and `y`")
else:
raise TypeError("Must pass valus for either `x` or `y`")
plotter = _BarPlotter(x, y, hue, data, order, hue_order,
estimator, ci, n_boot, units,
orient, color, palette, saturation,
errcolor)
plotter.value_label = "count"
if ax is None:
ax = plt.gca()
plotter.plot(ax, kwargs)
return ax
countplot.__doc__ = dedent("""\
Show the counts of observations in each categorical bin using bars.
A count plot can be thought of as a histogram across a categorical, instead
of quantitative, variable. The basic API and options are identical to those
for :func:`barplot`, so you can compare counts across nested variables.
{main_api_narrative}
Parameters
----------
{input_params}
{categorical_data}
{order_vars}
{orient}
{color}
{palette}
{saturation}
{ax_in}
kwargs : key, value mappings
Other keyword arguments are passed to ``plt.bar``.
Returns
-------
{ax_out}
See Also
--------
{barplot}
{factorplot}
Examples
--------
Show value counts for a single categorical variable:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set(style="darkgrid")
>>> titanic = sns.load_dataset("titanic")
>>> ax = sns.countplot(x="class", data=titanic)
Show value counts for two categorical variables:
.. plot::
:context: close-figs
>>> ax = sns.countplot(x="class", hue="who", data=titanic)
Plot the bars horizontally:
.. plot::
:context: close-figs
>>> ax = sns.countplot(y="class", hue="who", data=titanic)
Use a different color palette:
.. plot::
:context: close-figs
>>> ax = sns.countplot(x="who", data=titanic, palette="Set3")
Use ``plt.bar`` keyword arguments for a different look:
.. plot::
:context: close-figs
>>> ax = sns.countplot(x="who", data=titanic,
... facecolor=(0, 0, 0, 0),
... linewidth=5,
... edgecolor=sns.color_palette("dark", 3))
""").format(**_categorical_docs)
def factorplot(x=None, y=None, hue=None, data=None, row=None, col=None,
col_wrap=None, estimator=np.mean, ci=95, n_boot=1000,
units=None, order=None, hue_order=None, row_order=None,
col_order=None, kind="point", size=4, aspect=1,
orient=None, color=None, palette=None,
legend=True, legend_out=True, sharex=True, sharey=True,
margin_titles=False, facet_kws=None, **kwargs):
# Handle some deprecated arguments
if "hline" in kwargs:
kwargs.pop("hline")
warnings.warn("The `hline` parameter has been removed", UserWarning)
if "dropna" in kwargs:
kwargs.pop("dropna")
warnings.warn("The `dropna` parameter has been removed", UserWarning)
if "x_order" in kwargs:
order = kwargs.pop("x_order")
warnings.warn("The `x_order` parameter has been renamed `order`",
UserWarning)
# Determine the plotting function
try:
plot_func = globals()[kind + "plot"]
except KeyError:
err = "Plot kind '{}' is not recognized".format(kind)
raise ValueError(err)
# Alias the input variables to determine categorical order and palette
# correctly in the case of a count plot
if kind == "count":
if x is None and y is not None:
x_, y_, orient = y, y, "h"
elif y is None and x is not None:
x_, y_, orient = x, x, "v"
else:
raise ValueError("Either `x` or `y` must be None for count plots")
else:
x_, y_ = x, y
# Determine the order for the whole dataset, which will be used in all
# facets to ensure representation of all data in the final plot
p = _CategoricalPlotter()
p.establish_variables(x_, y_, hue, data, orient, order, hue_order)
order = p.group_names
hue_order = p.hue_names
# Determine the palette to use
# (FacetGrid will pass a value for ``color`` to the plotting function
# so we need to define ``palette`` to get default behavior for the
# categorical functions
p.establish_colors(color, palette, 1)
if kind != "point" or hue is not None:
palette = p.colors
# Determine keyword arguments for the facets
facet_kws = {} if facet_kws is None else facet_kws
facet_kws.update(
data=data, row=row, col=col,
row_order=row_order, col_order=col_order,
col_wrap=col_wrap, size=size, aspect=aspect,
sharex=sharex, sharey=sharey,
legend_out=legend_out, margin_titles=margin_titles,
dropna=False,
)
# Determine keyword arguments for the plotting function
plot_kws = dict(
order=order, hue_order=hue_order,
orient=orient, color=color, palette=palette,
)
plot_kws.update(kwargs)
if kind in ["bar", "point"]:
plot_kws.update(
estimator=estimator, ci=ci, n_boot=n_boot, units=units,
)
# Initialize the facets
g = FacetGrid(**facet_kws)
# Draw the plot onto the facets
g.map_dataframe(plot_func, x, y, hue, **plot_kws)
# Special case axis labels for a count type plot
if kind == "count":
if x is None:
g.set_axis_labels(x_var="count")
if y is None:
g.set_axis_labels(y_var="count")
if legend and (hue is not None) and (hue not in [x, row, col]):
hue_order = list(map(str, hue_order))
g.add_legend(title=hue, label_order=hue_order)
return g
factorplot.__doc__ = dedent("""\
Draw a categorical plot onto a FacetGrid.
The default plot that is shown is a point plot, but other seaborn
categorical plots can be chosen with the ``kind`` parameter, including
box plots, violin plots, bar plots, or strip plots.
It is important to choose how variables get mapped to the plot structure
such that the most important comparisons are easiest to make. As a general
rule, it is easier to compare positions that are closer together, so the
``hue`` variable should be used for the most important comparisons. For
secondary comparisons, try to share the quantitative axis (so, use ``col``
for vertical plots and ``row`` for horizontal plots). Note that, although
it is possible to make rather complex plots using this function, in many
cases you may be better served by created several smaller and more focused
plots than by trying to stuff many comparisons into one figure.
After plotting, the :class:`FacetGrid` with the plot is returned and can
be used directly to tweak supporting plot details or add other layers.
Note that, unlike when using the underlying plotting functions directly,
data must be passed in a long-form DataFrame with variables specified by
passing strings to ``x``, ``y``, ``hue``, and other parameters.
As in the case with the underlying plot functions, if variables have a
``categorical`` data type, the correct orientation of the plot elements,
the levels of the categorical variables, and their order will be inferred
from the objects. Otherwise you may have to use the function parameters
(``orient``, ``order``, ``hue_order``, etc.) to set up the plot correctly.
Parameters
----------
{string_input_params}
{long_form_data}
row, col : names of variables in ``data``, optional
Categorical variables that will determine the faceting of the grid.
{col_wrap}
{stat_api_params}
{order_vars}
row_order, col_order : lists of strings, optional
Order to organize the rows and/or columns of the grid in, otherwise the
orders are inferred from the data objects.
kind : {{``point``, ``bar``, ``count``, ``box``, ``violin``, ``strip``}}
The kind of plot to draw.
{size}
{aspect}
{orient}
{color}
{palette}
legend : bool, optional
If ``True`` and there is a ``hue`` variable, draw a legend on the plot.
{legend_out}
{share_xy}
{margin_titles}
facet_kws : dict, optional
Dictionary of other keyword arguments to pass to :class:`FacetGrid`.
kwargs : key, value pairings
Other keyword arguments are passed through to the underlying plotting
function.
Returns
-------
g : :class:`FacetGrid`
Returns the :class:`FacetGrid` object with the plot on it for further
tweaking.
Examples
--------
Draw a single facet to use the :class:`FacetGrid` legend placement:
.. plot::
:context: close-figs
>>> import seaborn as sns
>>> sns.set(style="ticks")
>>> exercise = sns.load_dataset("exercise")
>>> g = sns.factorplot(x="time", y="pulse", hue="kind", data=exercise)
Use a different plot kind to visualize the same data:
.. plot::
:context: close-figs
>>> g = sns.factorplot(x="time", y="pulse", hue="kind",
... data=exercise, kind="violin")
Facet along the columns to show a third categorical variable:
.. plot::
:context: close-figs
>>> g = sns.factorplot(x="time", y="pulse", hue="kind",
... col="diet", data=exercise)
Use a different size and aspect ratio for the facets:
.. plot::
:context: close-figs
>>> g = sns.factorplot(x="time", y="pulse", hue="kind",
... col="diet", data=exercise,
... size=5, aspect=.8)
Make many column facets and wrap them into the rows of the grid:
.. plot::
:context: close-figs
>>> titanic = sns.load_dataset("titanic")
>>> g = sns.factorplot("alive", col="deck", col_wrap=4,
... data=titanic[titanic.deck.notnull()],
... kind="count", size=2.5, aspect=.8)
Plot horizontally and pass other keyword arguments to the plot function:
.. plot::
:context: close-figs
>>> g = sns.factorplot(x="age", y="embark_town",
... hue="sex", row="class",
... data=titanic[titanic.embark_town.notnull()],
... orient="h", size=2, aspect=3.5, palette="Set3",
... kind="violin", split=True, cut=0, bw=.2)
Use methods on the returned :class:`FacetGrid` to tweak the presentation:
.. plot::
:context: close-figs
>>> g = sns.factorplot(x="who", y="survived", col="class",
... data=titanic, saturation=.5,
... kind="bar", ci=None, aspect=.6)
>>> (g.set_axis_labels("", "Survival Rate")
... .set_xticklabels(["Men", "Women", "Children"])
... .set_titles("{{col_name}} {{col_var}}")
... .set(ylim=(0, 1))
... .despine(left=True)) #doctest: +ELLIPSIS
<seaborn.axisgrid.FacetGrid object at 0x...>
""").format(**_categorical_docs)
| bsd-3-clause |
kazemakase/scikit-learn | sklearn/decomposition/base.py | 313 | 5647 | """Principal Component Analysis Base Classes"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis A. Engemann <d.engemann@fz-juelich.de>
# Kyle Kastner <kastnerkyle@gmail.com>
#
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
from ..utils.extmath import fast_dot
from ..utils.validation import check_is_fitted
from ..externals import six
from abc import ABCMeta, abstractmethod
class _BasePCA(six.with_metaclass(ABCMeta, BaseEstimator, TransformerMixin)):
"""Base class for PCA methods.
Warning: This class should not be used directly.
Use derived classes instead.
"""
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances, and sigma2 contains the
noise variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
@abstractmethod
def fit(X, y=None):
"""Placeholder for fit. Subclasses should implement this method!
Fit the model with X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
def transform(self, X, y=None):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import IncrementalPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> ipca = IncrementalPCA(n_components=2, batch_size=3)
>>> ipca.fit(X)
IncrementalPCA(batch_size=3, copy=True, n_components=2, whiten=False)
>>> ipca.transform(X) # doctest: +SKIP
"""
check_is_fitted(self, ['mean_', 'components_'], all_or_any=all)
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
In other words, return an input X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform will compute the
exact inverse operation, which includes reversing whitening.
"""
if self.whiten:
return fast_dot(X, np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return fast_dot(X, self.components_) + self.mean_
| bsd-3-clause |
zhenwendai/RGP | autoreg/benchmark/run.py | 1 | 2773 | # Copyright (c) 2015, Zhenwen Dai
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from __future__ import print_function
from evaluation import RMSE
from methods import Autoreg_onelayer, Autoreg_onelayer_bfgs
from tasks import all_tasks
from outputs import PickleOutput, CSV_Summary
import numpy as np
import time
import pandas
outpath = '.'
prjname = 'autoreg'
default_config = {
'evaluations':[RMSE,'time'],
'methods':[Autoreg_onelayer_bfgs],
'tasks': all_tasks,
'repeats': 5,
'outputs': [PickleOutput(outpath, prjname), CSV_Summary(outpath, prjname)]
}
def run(config=default_config):
nTask = len(config['tasks'])
nMethod = len(config['methods'])
nRepeats = int(config['repeats'])
task_list = pandas.DataFrame({'task_id':range(nTask), 'task':[t.name for t in config['tasks']]})
method_list = pandas.DataFrame({'method_id':range(nMethod), 'method':[m.name for m in config['methods']]})
results = pandas.DataFrame(index=range(nTask*nMethod*nRepeats),columns=['task_id','method_id','repeat_id','model']+[e.name if not isinstance(e, str) else e for e in config['evaluations']])
for task_i in range(nTask):
dataset = config['tasks'][task_i]()
print('Benchmarking on '+dataset.name)
res = dataset.load_data()
win_in, win_out = dataset.win_in, dataset.win_out
if not res: print('Fail to load '+config['tasks'][task_i].name); continue
train = dataset.get_training_data()
test = dataset.get_test_data()
for method_i in range(nMethod):
method = config['methods'][method_i]
print('With the method '+method.name, end='')
for ri in range(nRepeats):
res_idx = ri+method_i*nRepeats+task_i*nRepeats*nMethod
t_st = time.time()
m = method(win_in, win_out)
m.fit(train)
pred = m.predict(test[0])
t_pd = time.time() - t_st
results.iloc[res_idx,0], results.iloc[res_idx,1], results.iloc[res_idx,2] = task_i, method_i, ri
results.iloc[res_idx,3] = m.model.param_array.copy()
for ei in range(len(config['evaluations'])):
if config['evaluations'][ei] !='time':
evalu = config['evaluations'][ei]()
results.iloc[res_idx,ei+4] = evalu.evaluate(test[1][win_out+win_in:], pred)
else:
results.iloc[res_idx,ei+4] = t_pd
print('.',end='')
print()
[out.output(config, task_list, method_list, results) for out in config['outputs']]
| bsd-3-clause |
manashmndl/scikit-learn | sklearn/utils/tests/test_sparsefuncs.py | 57 | 13752 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from numpy.testing import assert_array_almost_equal, assert_array_equal
from sklearn.datasets import make_classification
from sklearn.utils.sparsefuncs import (mean_variance_axis,
inplace_column_scale,
inplace_row_scale,
inplace_swap_row, inplace_swap_column,
min_max_axis,
count_nonzero, csc_median_axis_0)
from sklearn.utils.sparsefuncs_fast import assign_rows_csr
from sklearn.utils.testing import assert_raises
def test_mean_variance_axis0():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
def test_mean_variance_illegal_axis():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_csr = sp.csr_matrix(X)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-3)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=2)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-1)
def test_mean_variance_axis1():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
def test_densify_rows():
X = sp.csr_matrix([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
rows = np.array([0, 2, 3], dtype=np.intp)
out = np.ones((rows.shape[0], X.shape[1]), dtype=np.float64)
assign_rows_csr(X, rows,
np.arange(out.shape[0], dtype=np.intp)[::-1], out)
assert_array_equal(out, X[rows].toarray()[::-1])
def test_inplace_column_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(200)
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_row_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(100)
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_swap_row():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
def test_inplace_swap_column():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
def test_min_max_axis0():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
def test_min_max_axis1():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
def test_min_max_axis_errors():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
assert_raises(TypeError, min_max_axis, X_csr.tolil(), axis=0)
assert_raises(ValueError, min_max_axis, X_csr, axis=2)
assert_raises(ValueError, min_max_axis, X_csc, axis=-3)
def test_count_nonzero():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
X_nonzero = X != 0
sample_weight = [.5, .2, .3, .1, .1]
X_nonzero_weighted = X_nonzero * np.array(sample_weight)[:, None]
for axis in [0, 1, -1, -2, None]:
assert_array_almost_equal(count_nonzero(X_csr, axis=axis),
X_nonzero.sum(axis=axis))
assert_array_almost_equal(count_nonzero(X_csr, axis=axis,
sample_weight=sample_weight),
X_nonzero_weighted.sum(axis=axis))
assert_raises(TypeError, count_nonzero, X_csc)
assert_raises(ValueError, count_nonzero, X_csr, axis=2)
def test_csc_row_median():
# Test csc_row_median actually calculates the median.
# Test that it gives the same output when X is dense.
rng = np.random.RandomState(0)
X = rng.rand(100, 50)
dense_median = np.median(X, axis=0)
csc = sp.csc_matrix(X)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test that it gives the same output when X is sparse
X = rng.rand(51, 100)
X[X < 0.7] = 0.0
ind = rng.randint(0, 50, 10)
X[ind] = -X[ind]
csc = sp.csc_matrix(X)
dense_median = np.median(X, axis=0)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test for toy data.
X = [[0, -2], [-1, -1], [1, 0], [2, 1]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0.5, -0.5]))
X = [[0, -2], [-1, -5], [1, -3]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0., -3]))
# Test that it raises an Error for non-csc matrices.
assert_raises(TypeError, csc_median_axis_0, sp.csr_matrix(X))
| bsd-3-clause |
samzhang111/scikit-learn | examples/gaussian_process/plot_gpr_co2.py | 9 | 5718 | """
========================================================
Gaussian process regression (GPR) on Mauna Loa CO2 data.
========================================================
This example is based on Section 5.4.3 of "Gaussian Processes for Machine
Learning" [RW2006]. It illustrates an example of complex kernel engineering and
hyperparameter optimization using gradient ascent on the
log-marginal-likelihood. The data consists of the monthly average atmospheric
CO2 concentrations (in parts per million by volume (ppmv)) collected at the
Mauna Loa Observatory in Hawaii, between 1958 and 1997. The objective is to
model the CO2 concentration as a function of the time t.
The kernel is composed of several terms that are responsible for explaining
different properties of the signal:
- a long term, smooth rising trend is to be explained by an RBF kernel. The
RBF kernel with a large length-scale enforces this component to be smooth;
it is not enforced that the trend is rising which leaves this choice to the
GP. The specific length-scale and the amplitude are free hyperparameters.
- a seasonal component, which is to be explained by the periodic
ExpSineSquared kernel with a fixed periodicity of 1 year. The length-scale
of this periodic component, controlling its smoothness, is a free parameter.
In order to allow decaying away from exact periodicity, the product with an
RBF kernel is taken. The length-scale of this RBF component controls the
decay time and is a further free parameter.
- smaller, medium term irregularities are to be explained by a
RationalQuadratic kernel component, whose length-scale and alpha parameter,
which determines the diffuseness of the length-scales, are to be determined.
According to [RW2006], these irregularities can better be explained by
a RationalQuadratic than an RBF kernel component, probably because it can
accommodate several length-scales.
- a "noise" term, consisting of an RBF kernel contribution, which shall
explain the correlated noise components such as local weather phenomena,
and a WhiteKernel contribution for the white noise. The relative amplitudes
and the RBF's length scale are further free parameters.
Maximizing the log-marginal-likelihood after subtracting the target's mean
yields the following kernel with an LML of -83.214:
34.4**2 * RBF(length_scale=41.8)
+ 3.27**2 * RBF(length_scale=180) * ExpSineSquared(length_scale=1.44,
periodicity=1)
+ 0.446**2 * RationalQuadratic(alpha=17.7, length_scale=0.957)
+ 0.197**2 * RBF(length_scale=0.138) + WhiteKernel(noise_level=0.0336)
Thus, most of the target signal (34.4ppm) is explained by a long-term rising
trend (length-scale 41.8 years). The periodic component has an amplitude of
3.27ppm, a decay time of 180 years and a length-scale of 1.44. The long decay
time indicates that we have a locally very close to periodic seasonal
component. The correlated noise has an amplitude of 0.197ppm with a length
scale of 0.138 years and a white-noise contribution of 0.197ppm. Thus, the
overall noise level is very small, indicating that the data can be very well
explained by the model. The figure shows also that the model makes very
confident predictions until around 2015.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels \
import RBF, WhiteKernel, RationalQuadratic, ExpSineSquared
from sklearn.datasets import fetch_mldata
data = fetch_mldata('mauna-loa-atmospheric-co2').data
X = data[:, [1]]
y = data[:, 0]
# Kernel with parameters given in GPML book
k1 = 66.0**2 * RBF(length_scale=67.0) # long term smooth rising trend
k2 = 2.4**2 * RBF(length_scale=90.0) \
* ExpSineSquared(length_scale=1.3, periodicity=1.0) # seasonal component
# medium term irregularity
k3 = 0.66**2 \
* RationalQuadratic(length_scale=1.2, alpha=0.78)
k4 = 0.18**2 * RBF(length_scale=0.134) \
+ WhiteKernel(noise_level=0.19**2) # noise terms
kernel_gpml = k1 + k2 + k3 + k4
gp = GaussianProcessRegressor(kernel=kernel_gpml, alpha=0,
optimizer=None, normalize_y=True)
gp.fit(X, y)
print("GPML kernel: %s" % gp.kernel_)
print("Log-marginal-likelihood: %.3f"
% gp.log_marginal_likelihood(gp.kernel_.theta))
# Kernel with optimized parameters
k1 = 50.0**2 * RBF(length_scale=50.0) # long term smooth rising trend
k2 = 2.0**2 * RBF(length_scale=100.0) \
* ExpSineSquared(length_scale=1.0, periodicity=1.0,
periodicity_bounds="fixed") # seasonal component
# medium term irregularities
k3 = 0.5**2 * RationalQuadratic(length_scale=1.0, alpha=1.0)
k4 = 0.1**2 * RBF(length_scale=0.1) \
+ WhiteKernel(noise_level=0.1**2,
noise_level_bounds=(1e-3, np.inf)) # noise terms
kernel = k1 + k2 + k3 + k4
gp = GaussianProcessRegressor(kernel=kernel, alpha=0,
normalize_y=True)
gp.fit(X, y)
print("\nLearned kernel: %s" % gp.kernel_)
print("Log-marginal-likelihood: %.3f"
% gp.log_marginal_likelihood(gp.kernel_.theta))
X_ = np.linspace(X.min(), X.max() + 30, 1000)[:, np.newaxis]
y_pred, y_std = gp.predict(X_, return_std=True)
# Illustration
plt.scatter(X, y, c='k')
plt.plot(X_, y_pred)
plt.fill_between(X_[:, 0], y_pred - y_std, y_pred + y_std,
alpha=0.5, color='k')
plt.xlim(X_.min(), X_.max())
plt.xlabel("Year")
plt.ylabel(r"CO$_2$ in ppm")
plt.title(r"Atmospheric CO$_2$ concentration at Mauna Loa")
plt.tight_layout()
plt.show()
| bsd-3-clause |
pradyu1993/scikit-learn | sklearn/linear_model/tests/test_omp.py | 2 | 7018 | # Author: Vlad Niculae
# License: BSD style
import warnings
import numpy as np
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.linear_model import orthogonal_mp, orthogonal_mp_gram, \
OrthogonalMatchingPursuit
from sklearn.utils.fixes import count_nonzero
from sklearn.datasets import make_sparse_coded_signal
n_samples, n_features, n_nonzero_coefs, n_targets = 20, 30, 5, 3
y, X, gamma = make_sparse_coded_signal(n_targets, n_features, n_samples,
n_nonzero_coefs, random_state=0)
G, Xy = np.dot(X.T, X), np.dot(X.T, y)
# this makes X (n_samples, n_features)
# and y (n_samples, 3)
def test_correct_shapes():
assert_equal(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp(X, y, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_correct_shapes_gram():
assert_equal(orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_n_nonzero_coefs():
assert_true(count_nonzero(orthogonal_mp(X, y[:, 0],
n_nonzero_coefs=5)) <= 5)
assert_true(count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5,
precompute_gram=True)) <= 5)
def test_tol():
tol = 0.5
gamma = orthogonal_mp(X, y[:, 0], tol=tol)
gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute_gram=True)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol)
def test_with_without_gram():
assert_array_almost_equal(
orthogonal_mp(X, y, n_nonzero_coefs=5),
orthogonal_mp(X, y, n_nonzero_coefs=5, precompute_gram=True))
def test_with_without_gram_tol():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=1.),
orthogonal_mp(X, y, tol=1., precompute_gram=True))
def test_unreachable_accuracy():
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0),
orthogonal_mp(X, y, n_nonzero_coefs=n_features))
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0, precompute_gram=True),
orthogonal_mp(X, y, precompute_gram=True,
n_nonzero_coefs=n_features))
assert_greater(len(w), 0) # warnings should be raised
def test_bad_input():
assert_raises(ValueError, orthogonal_mp, X, y, tol=-1)
assert_raises(ValueError, orthogonal_mp, X, y, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp, X, y,
n_nonzero_coefs=n_features + 1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, tol=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy,
n_nonzero_coefs=n_features + 1)
def test_perfect_signal_recovery():
# XXX: use signal generator
idx, = gamma[:, 0].nonzero()
gamma_rec = orthogonal_mp(X, y[:, 0], 5)
gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], 5)
assert_array_equal(idx, np.flatnonzero(gamma_rec))
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2)
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_estimator():
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_.shape, ())
assert_true(count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_.shape, (n_targets,))
assert_true(count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
omp.set_params(fit_intercept=False, normalize=False)
omp.fit(X, y[:, 0], Gram=G, Xy=Xy[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_, 0)
assert_true(count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y, Gram=G, Xy=Xy)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_, 0)
assert_true(count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
def test_scaling_with_gram():
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
# Use only 1 nonzero coef to be faster and to avoid warnings
omp1 = OrthogonalMatchingPursuit(n_nonzero_coefs=1,
fit_intercept=False, normalize=False)
omp2 = OrthogonalMatchingPursuit(n_nonzero_coefs=1,
fit_intercept=True, normalize=False)
omp3 = OrthogonalMatchingPursuit(n_nonzero_coefs=1,
fit_intercept=False, normalize=True)
omp1.fit(X, y, Gram=G)
omp1.fit(X, y, Gram=G, Xy=Xy)
assert_true(len(w) == 0)
omp2.fit(X, y, Gram=G)
assert_true(len(w) == 1)
omp2.fit(X, y, Gram=G, Xy=Xy)
assert_true(len(w) == 2)
omp3.fit(X, y, Gram=G)
assert_true(len(w) == 3)
omp3.fit(X, y, Gram=G, Xy=Xy)
assert_true(len(w) == 4)
def test_identical_regressors():
newX = X.copy()
newX[:, 1] = newX[:, 0]
gamma = np.zeros(n_features)
gamma[0] = gamma[1] = 1.
newy = np.dot(newX, gamma)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
orthogonal_mp(newX, newy, 2)
assert_true(len(w) == 1)
def test_swapped_regressors():
gamma = np.zeros(n_features)
# X[:, 21] should be selected first, then X[:, 0] selected second,
# which will take X[:, 21]'s place in case the algorithm does
# column swapping for optimization (which is the case at the moment)
gamma[21] = 1.0
gamma[0] = 0.5
new_y = np.dot(X, gamma)
new_Xy = np.dot(X.T, new_y)
gamma_hat = orthogonal_mp(X, new_y, 2)
gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, 2)
assert_array_equal(np.flatnonzero(gamma_hat), [0, 21])
assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21])
def test_no_atoms():
y_empty = np.zeros_like(y)
Xy_empty = np.dot(X.T, y_empty)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
gamma_empty = orthogonal_mp(X, y_empty, 1)
gamma_empty_gram = orthogonal_mp_gram(G, Xy_empty, 1)
assert_equal(np.all(gamma_empty == 0), True)
assert_equal(np.all(gamma_empty_gram == 0), True)
| bsd-3-clause |
shenzebang/scikit-learn | examples/linear_model/plot_omp.py | 385 | 2263 | """
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
###################
# y = Xw
# |x|_0 = n_nonzero_coefs
y, X, w = make_sparse_coded_signal(n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0)
idx, = w.nonzero()
# distort the clean signal
##########################
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
########################
plt.figure(figsize=(7, 7))
plt.subplot(4, 1, 1)
plt.xlim(0, 512)
plt.title("Sparse signal")
plt.stem(idx, w[idx])
# plot the noise-free reconstruction
####################################
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 2)
plt.xlim(0, 512)
plt.title("Recovered signal from noise-free measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction
###############################
omp.fit(X, y_noisy)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 3)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction with number of non-zeros set by CV
##################################################################
omp_cv = OrthogonalMatchingPursuitCV()
omp_cv.fit(X, y_noisy)
coef = omp_cv.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 4)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements with CV")
plt.stem(idx_r, coef[idx_r])
plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
plt.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit',
fontsize=16)
plt.show()
| bsd-3-clause |
Akshay0724/scikit-learn | sklearn/neighbors/tests/test_dist_metrics.py | 36 | 6957 | import itertools
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
import scipy
from scipy.spatial.distance import cdist
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.neighbors import BallTree
from sklearn.utils.testing import SkipTest, assert_raises_regex
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def cmp_version(version1, version2):
version1 = tuple(map(int, version1.split('.')[:2]))
version2 = tuple(map(int, version2.split('.')[:2]))
if version1 < version2:
return -1
elif version1 > version2:
return 1
else:
return 0
class TestMetrics:
def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5,
rseed=0, dtype=np.float64):
np.random.seed(rseed)
self.X1 = np.random.random((n1, d)).astype(dtype)
self.X2 = np.random.random((n2, d)).astype(dtype)
# make boolean arrays: ones and zeros
self.X1_bool = self.X1.round(0)
self.X2_bool = self.X2.round(0)
V = np.random.random((d, d))
VI = np.dot(V, V.T)
self.metrics = {'euclidean': {},
'cityblock': {},
'minkowski': dict(p=(1, 1.5, 2, 3)),
'chebyshev': {},
'seuclidean': dict(V=(np.random.random(d),)),
'wminkowski': dict(p=(1, 1.5, 3),
w=(np.random.random(d),)),
'mahalanobis': dict(VI=(VI,)),
'hamming': {},
'canberra': {},
'braycurtis': {}}
self.bool_metrics = ['matching', 'jaccard', 'dice',
'kulsinski', 'rogerstanimoto', 'russellrao',
'sokalmichener', 'sokalsneath']
def test_cdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X2, metric, **kwargs)
yield self.check_cdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X2_bool, metric)
yield self.check_cdist_bool, metric, D_true
def check_cdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1, self.X2)
assert_array_almost_equal(D12, D_true)
def check_cdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool, self.X2_bool)
assert_array_almost_equal(D12, D_true)
def test_pdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X1, metric, **kwargs)
yield self.check_pdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X1_bool, metric)
yield self.check_pdist_bool, metric, D_true
def check_pdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1)
assert_array_almost_equal(D12, D_true)
def check_pdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool)
assert_array_almost_equal(D12, D_true)
def test_pickle(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
yield self.check_pickle, metric, kwargs
for metric in self.bool_metrics:
yield self.check_pickle_bool, metric
def check_pickle_bool(self, metric):
dm = DistanceMetric.get_metric(metric)
D1 = dm.pairwise(self.X1_bool)
dm2 = pickle.loads(pickle.dumps(dm))
D2 = dm2.pairwise(self.X1_bool)
assert_array_almost_equal(D1, D2)
def check_pickle(self, metric, kwargs):
dm = DistanceMetric.get_metric(metric, **kwargs)
D1 = dm.pairwise(self.X1)
dm2 = pickle.loads(pickle.dumps(dm))
D2 = dm2.pairwise(self.X1)
assert_array_almost_equal(D1, D2)
def test_haversine_metric():
def haversine_slow(x1, x2):
return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2
+ np.cos(x1[0]) * np.cos(x2[0]) *
np.sin(0.5 * (x1[1] - x2[1])) ** 2))
X = np.random.random((10, 2))
haversine = DistanceMetric.get_metric("haversine")
D1 = haversine.pairwise(X)
D2 = np.zeros_like(D1)
for i, x1 in enumerate(X):
for j, x2 in enumerate(X):
D2[i, j] = haversine_slow(x1, x2)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(haversine.dist_to_rdist(D1),
np.sin(0.5 * D2) ** 2)
def test_pyfunc_metric():
X = np.random.random((10, 3))
euclidean = DistanceMetric.get_metric("euclidean")
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
# Check if both callable metric and predefined metric initialized
# DistanceMetric object is picklable
euclidean_pkl = pickle.loads(pickle.dumps(euclidean))
pyfunc_pkl = pickle.loads(pickle.dumps(pyfunc))
D1 = euclidean.pairwise(X)
D2 = pyfunc.pairwise(X)
D1_pkl = euclidean_pkl.pairwise(X)
D2_pkl = pyfunc_pkl.pairwise(X)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(D1_pkl, D2_pkl)
def test_bad_pyfunc_metric():
def wrong_distance(x, y):
return "1"
X = np.ones((5, 2))
assert_raises_regex(TypeError,
"Custom distance function must accept two vectors",
BallTree, X, metric=wrong_distance)
def test_input_data_size():
# Regression test for #6288
# Previoulsly, a metric requiring a particular input dimension would fail
def custom_metric(x, y):
assert x.shape[0] == 3
return np.sum((x - y) ** 2)
rng = np.random.RandomState(0)
X = rng.rand(10, 3)
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
eucl = DistanceMetric.get_metric("euclidean")
assert_array_almost_equal(pyfunc.pairwise(X), eucl.pairwise(X))
| bsd-3-clause |
rjeli/scikit-image | doc/examples/features_detection/plot_holes_and_peaks.py | 9 | 2713 | """
===============================
Filling holes and finding peaks
===============================
We fill holes (i.e. isolated, dark spots) in an image using morphological
reconstruction by erosion. Erosion expands the minimal values of the seed image
until it encounters a mask image. Thus, the seed image and mask image represent
the maximum and minimum possible values of the reconstructed image.
We start with an image containing both peaks and holes:
"""
import matplotlib.pyplot as plt
from skimage import data
from skimage.exposure import rescale_intensity
image = data.moon()
# Rescale image intensity so that we can see dim features.
image = rescale_intensity(image, in_range=(50, 200))
######################################################################
# Now we need to create the seed image, where the minima represent the
# starting points for erosion. To fill holes, we initialize the seed image
# to the maximum value of the original image. Along the borders, however, we
# use the original values of the image. These border pixels will be the
# starting points for the erosion process. We then limit the erosion by
# setting the mask to the values of the original image.
import numpy as np
from skimage.morphology import reconstruction
seed = np.copy(image)
seed[1:-1, 1:-1] = image.max()
mask = image
filled = reconstruction(seed, mask, method='erosion')
######################################################################
# As shown above, eroding inward from the edges removes holes, since (by
# definition) holes are surrounded by pixels of brighter value. Finally, we
# can isolate the dark regions by subtracting the reconstructed image from
# the original image.
#
# Alternatively, we can find bright spots in an image using morphological
# reconstruction by dilation. Dilation is the inverse of erosion and expands
# the *maximal* values of the seed image until it encounters a mask image.
# Since this is an inverse operation, we initialize the seed image to the
# minimum image intensity instead of the maximum. The remainder of the
# process is the same.
seed = np.copy(image)
seed[1:-1, 1:-1] = image.min()
rec = reconstruction(seed, mask, method='dilation')
fig, ax = plt.subplots(2, 2, figsize=(5, 4), sharex=True, sharey=True,
subplot_kw={'adjustable': 'box-forced'})
ax = ax.ravel()
ax[0].imshow(image, cmap='gray')
ax[0].set_title('Original image')
ax[0].axis('off')
ax[1].imshow(filled, cmap='gray')
ax[1].set_title('after filling holes')
ax[1].axis('off')
ax[2].imshow(image-filled, cmap='gray')
ax[2].set_title('holes')
ax[2].axis('off')
ax[3].imshow(image-rec, cmap='gray')
ax[3].set_title('peaks')
ax[3].axis('off')
plt.show()
| bsd-3-clause |
lisitsyn/shogun | applications/easysvm/tutpaper/svm_params.py | 12 | 12908 |
#from matplotlib import rc
#rc('text', usetex=True)
fontsize = 16
contourFontsize = 12
showColorbar = False
xmin = -1
xmax = 1
ymin = -1.05
ymax = 1
import sys,os
import numpy
import shogun
from shogun import GaussianKernel, LinearKernel, PolyKernel
from shogun import RealFeatures, BinaryLabels
from shogun import LibSVM
from numpy import arange
import matplotlib
from matplotlib import pylab
pylab.rcParams['contour.negative_linestyle'] = 'solid'
def features_from_file(fileName) :
fileHandle = open(fileName)
fileHandle.readline()
features = []
labels = []
for line in fileHandle :
tokens = line.split(',')
labels.append(float(tokens[1]))
features.append([float(token) for token in tokens[2:]])
return RealFeatures(numpy.transpose(numpy.array(features))), features, BinaryLabels(numpy.array(labels,numpy.float))
def create_kernel(kname, features, kparam=None) :
if kname == 'gauss' :
kernel = GaussianKernel(features, features, kparam)
elif kname == 'linear':
kernel = LinearKernel(features, features)
elif kname == 'poly' :
kernel = PolyKernel(features, features, kparam, True, False)
return kernel
def svm_train(kernel, labels, C1, C2=None):
"""Trains a SVM with the given kernel"""
num_threads = 1
kernel.io.disable_progress()
svm = LibSVM(C1, kernel, labels)
if C2:
svm.set_C(C1, C2)
svm.parallel.set_num_threads(num_threads)
svm.io.disable_progress()
svm.train()
return svm
def svm_test(svm, kernel, features_train, features_test) :
"""predicts on the test examples"""
kernel.init(features_train, features_test)
output = svm.apply().get_labels()
return output
def decision_boundary_plot(svm, features, vectors, labels, kernel, fileName = None, **args) :
title = None
if 'title' in args :
title = args['title']
xlabel = None
if 'xlabel' in args :
xlabel = args['xlabel']
ylabel = None
if 'ylabel' in args :
ylabel = args['ylabel']
fontsize = 'medium'
if 'fontsize' in args :
fontsize = args['fontsize']
contourFontsize = 10
if 'contourFontsize' in args :
contourFontsize = args['contourFontsize']
showColorbar = True
if 'showColorbar' in args :
showColorbar = args['showColorbar']
show = True
if fileName is not None :
show = False
if 'show' in args :
show = args['show']
# setting up the grid
delta = 0.005
x = arange(xmin, xmax, delta)
y = arange(ymin, ymax, delta)
Z = numpy.zeros((len(x), len(y)), numpy.float_)
gridX = numpy.zeros((len(x) *len(y), 2), numpy.float_)
n = 0
for i in range(len(x)) :
for j in range(len(y)) :
gridX[n][0] = x[i]
gridX[n][1] = y[j]
n += 1
if kernel.get_name() == 'Linear' and 'customwandb' in args:
kernel.init_optimization_svm(svm)
b=svm.get_bias()
w=kernel.get_w()
kernel.set_w(args['customwandb'][0])
svm.set_bias(args['customwandb'][1])
if kernel.get_name() == 'Linear' and 'drawarrow' in args:
kernel.init_optimization_svm(svm)
b=svm.get_bias()
w=kernel.get_w()
s=1.0/numpy.dot(w,w)/1.17
pylab.arrow(0,-b/w[1], w[0]*s,s*w[1], width=0.01, fc='#dddddd', ec='k')
grid_features = RealFeatures(numpy.transpose(gridX))
results = svm_test(svm, kernel, features, grid_features)
n = 0
for i in range(len(x)) :
for j in range(len(y)) :
Z[i][j] = results[n]
n += 1
cdict = {'red' :((0.0, 0.6, 0.6),(0.5, 0.8, 0.8),(1.0, 1.0, 1.0)),
'green':((0.0, 0.6, 0.6),(0.5, 0.8, 0.8),(1.0, 1.0, 1.0)),
'blue' :((0.0, 0.6, 0.6),(0.5, 0.8, 0.8),(1.0, 1.0, 1.0)),
}
my_cmap = matplotlib.colors.LinearSegmentedColormap('lightgray',cdict,256)
im = pylab.imshow(numpy.transpose(Z),
interpolation='bilinear', origin='lower',
cmap=my_cmap, extent=(xmin,xmax,ymin,ymax) )
if 'decisionboundaryonly' in args:
C1 = pylab.contour(numpy.transpose(Z),
[0],
origin='lower',
linewidths=(3),
colors = ['k'],
extent=(xmin,xmax,ymin,ymax))
else:
C1 = pylab.contour(numpy.transpose(Z),
[-1,0,1],
origin='lower',
linewidths=(1,3,1),
colors = ['k','k'],
extent=(xmin,xmax,ymin,ymax))
pylab.clabel(C1,
inline=1,
fmt='%1.1f',
fontsize=contourFontsize)
# plot the data
lab=labels.get_labels()
vec=numpy.array(vectors)
idx=numpy.where(lab==-1)[0]
pylab.scatter(vec[idx,0], vec[idx,1], s=300, c='#4444ff', marker='o', alpha=0.8, zorder=100)
idx=numpy.where(lab==+1)[0]
pylab.scatter(vec[idx,0], vec[idx,1], s=500, c='#ff4444', marker='s', alpha=0.8, zorder=100)
# plot SVs
if not 'decisionboundaryonly' in args:
training_outputs = svm_test(svm, kernel, features, features)
sv_idx=numpy.where(abs(training_outputs)<=1.01)[0]
pylab.scatter(vec[sv_idx,0], vec[sv_idx,1], s=100, c='k', marker='o', alpha=0.8, zorder=100)
if 'showmovedpoint' in args:
x=-0.779838709677
y=-0.1375
pylab.scatter([x], [y], s=300, c='#4e4e61', marker='o', alpha=1, zorder=100, edgecolor='#454548')
pylab.arrow(x,y-0.1, 0, -0.8/1.5, width=0.01, fc='#dddddd', ec='k')
#pylab.show()
if title is not None :
pylab.title(title, fontsize=fontsize)
if ylabel:
pylab.ylabel(ylabel,fontsize=fontsize)
if xlabel:
pylab.xlabel(xlabel,fontsize=fontsize)
if showColorbar :
pylab.colorbar(im)
# colormap:
pylab.hot()
if fileName is not None :
pylab.savefig(fileName)
if show :
pylab.show()
def add_percent_ticks():
ticks=pylab.getp(pylab.gca(),'xticks')
ticklabels=len(ticks)*['']
ticklabels[0]='0%'
ticklabels[-1]='100%'
pylab.setp(pylab.gca(), xticklabels=ticklabels)
pylab.setp(pylab.gca(), yticklabels=['0%','100%'])
ticks=pylab.getp(pylab.gca(),'yticks')
ticklabels=len(ticks)*['']
#ticklabels[0]='0%'
ticklabels[-1]='100%'
pylab.setp(pylab.gca(), yticklabels=ticklabels)
xticklabels = pylab.getp(pylab.gca(), 'xticklabels')
yticklabels = pylab.getp(pylab.gca(), 'yticklabels')
pylab.setp(xticklabels, fontsize=fontsize)
pylab.setp(yticklabels, fontsize=fontsize)
def create_figures(extension = 'pdf', directory = '../../tex/figures') :
if extension[0] != '.' :
extension = '.' + extension
dpi=90
# data and linear decision boundary
features,vectors,labels = features_from_file('data/small_gc_toy.data')
kernel = create_kernel('linear', features)
svm = svm_train(kernel, labels, 0.7)
pylab.figure(figsize=(8,6), dpi=dpi)
decision_boundary_plot(svm, features, vectors, labels, kernel,
fontsize=fontsize, contourFontsize=contourFontsize,
title="Linear Separation", customwandb=(numpy.array([-0.05, -1.0]), -0.3),
ylabel="GC Content Before 'AG'",xlabel="GC Content After 'AG'",
show=False, showColorbar=showColorbar, decisionboundaryonly=True)
add_percent_ticks()
pylab.savefig(os.path.join(directory, 'data_and_linear_classifier' + extension))
pylab.close()
#####################################################################################
# data and svm decision boundary
features,vectors,labels = features_from_file('data/small_gc_toy.data')
kernel = create_kernel('linear', features)
svm = svm_train(kernel, labels, 100)
pylab.figure(figsize=(8,6), dpi=dpi)
decision_boundary_plot(svm, features, vectors, labels, kernel,
fontsize=fontsize, contourFontsize=contourFontsize,
title="Maximum Margin Separation", drawarrow=True,
ylabel="GC Content Before 'AG'",xlabel="GC Content After 'AG'",
show=False, showColorbar=showColorbar)
add_percent_ticks()
pylab.savefig(os.path.join(directory, 'data_and_svm_classifier' + extension))
pylab.close()
#####################################################################################
# the effect of C on the decision surface:
features,vectors,labels = features_from_file('data/small_gc_toy_outlier.data')
pylab.figure(figsize=(16,6), dpi=dpi)
pylab.subplot(121)
kernel = create_kernel('linear', features)
svm = svm_train(kernel, labels, 200)
decision_boundary_plot(svm, features, vectors, labels, kernel,
title = 'Soft-Margin with C=200', ylabel="GC Content Before 'AG'",
xlabel="GC Content After 'AG'", fontsize=fontsize,
contourFontsize=contourFontsize, show=False, showmovedpoint=True,
showColorbar=showColorbar)
add_percent_ticks()
pylab.subplot(122)
kernel = create_kernel('linear', features)
svm = svm_train(kernel, labels, 2)
decision_boundary_plot(svm, features, vectors, labels, kernel,
title = 'Soft-Margin with C=2',
ylabel="GC Content Before 'AG'",xlabel="GC Content After 'AG'",
fontsize=fontsize, contourFontsize=contourFontsize, show=False, showColorbar=showColorbar)
add_percent_ticks()
#pylab.subplots_adjust(bottom=0.05, top=0.95)
pylab.savefig(os.path.join(directory, 'effect_of_c' + extension))
pylab.close()
####################################################################################
# playing with nonlinear data:
# the effect of kernel parameters
features,vectors,labels = features_from_file('data/small_gc_toy_outlier.data')
pylab.figure(figsize=(24,6), dpi=dpi)
pylab.subplot(131)
kernel = create_kernel('linear', features)
svm = svm_train(kernel, labels, 100)
decision_boundary_plot(svm, features, vectors, labels, kernel,
title = 'Linear Kernel',
ylabel="GC Content Before 'AG'",
fontsize=fontsize, contourFontsize=contourFontsize, show=False,showColorbar=showColorbar)
add_percent_ticks()
pylab.subplot(132)
kernel = create_kernel('poly', features, 2)
svm = svm_train(kernel, labels, 100)
decision_boundary_plot(svm, features, vectors, labels, kernel,
title='Polynomial Kernel d=2',
xlabel="GC Content After 'AG'",
fontsize=fontsize, contourFontsize=contourFontsize, show=False,showColorbar=showColorbar)
add_percent_ticks()
pylab.subplot(133)
kernel = create_kernel('poly', features, 5)
svm = svm_train(kernel, labels, 10)
decision_boundary_plot(svm, features, vectors, labels, kernel,
title='Polynomial Kernel d=5',
fontsize=fontsize, contourFontsize=contourFontsize, show=False,showColorbar=showColorbar)
add_percent_ticks()
#pylab.subplots_adjust(bottom=0.05, top=0.95)
pylab.savefig(os.path.join(directory, 'params_polynomial' + extension))
pylab.close()
####################################################################################
#effects of sigma
pylab.figure(figsize=(24,6), dpi=dpi)
pylab.subplot(131)
gamma = 0.1
sigma = 20.0
kernel = create_kernel('gauss', features, sigma)
svm = svm_train(kernel, labels, 100)
decision_boundary_plot(svm, features, vectors, labels, kernel,
title='Gaussian Kernel Sigma=20',
ylabel="GC Content Before 'AG'",
fontsize=fontsize, contourFontsize=contourFontsize, show=False,showColorbar=showColorbar)
add_percent_ticks()
pylab.subplot(132)
sigma = 1.0
kernel = create_kernel('gauss', features, sigma)
svm = svm_train(kernel, labels, 100)
decision_boundary_plot(svm, features, vectors, labels, kernel,
title='Gaussian Kernel Sigma=1',
xlabel="GC Content After 'AG'",
fontsize=fontsize, contourFontsize=contourFontsize, show=False,showColorbar=showColorbar)
add_percent_ticks()
pylab.subplot(133)
sigma = 0.05
kernel = create_kernel('gauss', features, sigma)
svm = svm_train(kernel, labels, 100)
decision_boundary_plot(svm, features, vectors, labels, kernel,
title='Gaussian Kernel Sigma=0.05',
fontsize=fontsize, contourFontsize=contourFontsize, show=False,showColorbar=showColorbar)
add_percent_ticks()
#pylab.subplots_adjust(bottom=0.05, top=0.95)
pylab.savefig(os.path.join(directory, 'params_gaussian' + extension))
pylab.close()
####################################################################################
if __name__ == '__main__' :
extension = 'pdf'
if len(sys.argv) > 1 :
extension = sys.argv[1]
pylab.ioff()
create_figures(extension)
| bsd-3-clause |
cimat/data-visualization-patterns | display-patterns/Proportions/Pruebas/A42Ring_Chart_Matplotlib.py | 1 | 2204 | library(ggplot2)
t<-table(mtcars$cyl)
x<-as.data.frame(t)
colnames(x)<-c("Cylindres", "Frequency")
bp <- ggplot(x, aes(x ="",y=Frequency, fill = Cylindres)) +
geom_bar(width = 1, stat = "identity") +labs (title="Proportion Cylindres in a Car Distribution")
pie <-bp+coord_polar("y", start=0)
pie + geom_text(aes(y = Frequency/3 + c(0, cumsum(Frequency)[-length(Frequency)]),
label = paste(round(Frequency/sum(Frequency) * 100), " %")), size=5)
import matplotlib.pyplot as plt
from datos import data
import pandas
import numpy as np
d=data('mtcars')
subset1, subset2, subset3= d[d.cyl==4], d[d.cyl==6], d[d.cyl==8]
ps = pandas.Series([i for i in subset1.gear])
c1 = ps.value_counts()
ps = pandas.Series([i for i in subset2.gear])
c2 = ps.value_counts()
ps = pandas.Series([i for i in subset3.gear])
c3 = ps.value_counts()
def pie(ax, values, **kwargs):
wedges, _, labels = ax.pie(values, autopct='%1.1f%%', **kwargs)
return wedges
fig, ax = plt.subplots()
ax.axis('equal')
width = 0.35
kwargs = dict(colors=[ '#FF9999','#66FF66'], startangle=90)
outside = pie(ax, c3.sort_index(), radius=1, pctdistance=1-width/2, **kwargs)
kwargs = dict(colors=['#FF9999', 'lightskyblue','#66FF66' ], startangle=90)
inside = pie(ax, c2.sort_index(), radius=1-width, pctdistance=1 - (width/2) / (1-width), **kwargs)
center = pie(ax, c1.sort_index(), radius=1-2*width, pctdistance=1 - (width/2) / (1-width), **kwargs)
plt.setp(center+inside + outside, width=width, edgecolor='white')
ax.legend(inside[::-1], ['5 Gear', '4 Gear', '3 Gear'], frameon=False)
kwargs = dict(size=13, color='white', va='center', fontweight='bold')
ax.text(0, 0, '4 Cylindres', ha='center', bbox=dict(boxstyle='round', facecolor='coral', edgecolor='none'),**kwargs)
ax.annotate('8 Cylindres', (0, 0), xytext=(np.radians(-45), 1), bbox=dict(boxstyle='round', facecolor='green', edgecolor='none'),
textcoords='polar', ha='left', **kwargs)
ax.annotate('6 Cylindres', (0, 0), xytext=(np.radians(-20), 0.9), bbox=dict(boxstyle='round', facecolor='blue', edgecolor='none'),
textcoords='polar', ha='right', **kwargs)
plt.title("Gear Car's Distribution by Cylindres", size=18)
plt.show() | cc0-1.0 |
matthewfranglen/spark | python/pyspark/sql/pandas/group_ops.py | 9 | 14192 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import warnings
from pyspark import since
from pyspark.rdd import PythonEvalType
from pyspark.sql.column import Column
from pyspark.sql.dataframe import DataFrame
class PandasGroupedOpsMixin(object):
"""
Min-in for pandas grouped operations. Currently, only :class:`GroupedData`
can use this class.
"""
@since(2.3)
def apply(self, udf):
"""
It is an alias of :meth:`pyspark.sql.GroupedData.applyInPandas`; however, it takes a
:meth:`pyspark.sql.functions.pandas_udf` whereas
:meth:`pyspark.sql.GroupedData.applyInPandas` takes a Python native function.
.. note:: It is preferred to use :meth:`pyspark.sql.GroupedData.applyInPandas` over this
API. This API will be deprecated in the future releases.
:param udf: a grouped map user-defined function returned by
:func:`pyspark.sql.functions.pandas_udf`.
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v"))
>>> @pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP) # doctest: +SKIP
... def normalize(pdf):
... v = pdf.v
... return pdf.assign(v=(v - v.mean()) / v.std())
>>> df.groupby("id").apply(normalize).show() # doctest: +SKIP
+---+-------------------+
| id| v|
+---+-------------------+
| 1|-0.7071067811865475|
| 1| 0.7071067811865475|
| 2|-0.8320502943378437|
| 2|-0.2773500981126146|
| 2| 1.1094003924504583|
+---+-------------------+
.. seealso:: :meth:`pyspark.sql.functions.pandas_udf`
"""
# Columns are special because hasattr always return True
if isinstance(udf, Column) or not hasattr(udf, 'func') \
or udf.evalType != PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF:
raise ValueError("Invalid udf: the udf argument must be a pandas_udf of type "
"GROUPED_MAP.")
warnings.warn(
"It is preferred to use 'applyInPandas' over this "
"API. This API will be deprecated in the future releases. See SPARK-28264 for "
"more details.", UserWarning)
return self.applyInPandas(udf.func, schema=udf.returnType)
@since(3.0)
def applyInPandas(self, func, schema):
"""
Maps each group of the current :class:`DataFrame` using a pandas udf and returns the result
as a `DataFrame`.
The function should take a `pandas.DataFrame` and return another
`pandas.DataFrame`. For each group, all columns are passed together as a `pandas.DataFrame`
to the user-function and the returned `pandas.DataFrame` are combined as a
:class:`DataFrame`.
The `schema` should be a :class:`StructType` describing the schema of the returned
`pandas.DataFrame`. The column labels of the returned `pandas.DataFrame` must either match
the field names in the defined schema if specified as strings, or match the
field data types by position if not strings, e.g. integer indices.
The length of the returned `pandas.DataFrame` can be arbitrary.
:param func: a Python native function that takes a `pandas.DataFrame`, and outputs a
`pandas.DataFrame`.
:param schema: the return type of the `func` in PySpark. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
>>> import pandas as pd # doctest: +SKIP
>>> from pyspark.sql.functions import pandas_udf, ceil
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v")) # doctest: +SKIP
>>> def normalize(pdf):
... v = pdf.v
... return pdf.assign(v=(v - v.mean()) / v.std())
>>> df.groupby("id").applyInPandas(
... normalize, schema="id long, v double").show() # doctest: +SKIP
+---+-------------------+
| id| v|
+---+-------------------+
| 1|-0.7071067811865475|
| 1| 0.7071067811865475|
| 2|-0.8320502943378437|
| 2|-0.2773500981126146|
| 2| 1.1094003924504583|
+---+-------------------+
Alternatively, the user can pass a function that takes two arguments.
In this case, the grouping key(s) will be passed as the first argument and the data will
be passed as the second argument. The grouping key(s) will be passed as a tuple of numpy
data types, e.g., `numpy.int32` and `numpy.float64`. The data will still be passed in
as a `pandas.DataFrame` containing all columns from the original Spark DataFrame.
This is useful when the user does not want to hardcode grouping key(s) in the function.
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v")) # doctest: +SKIP
>>> def mean_func(key, pdf):
... # key is a tuple of one numpy.int64, which is the value
... # of 'id' for the current group
... return pd.DataFrame([key + (pdf.v.mean(),)])
>>> df.groupby('id').applyInPandas(
... mean_func, schema="id long, v double").show() # doctest: +SKIP
+---+---+
| id| v|
+---+---+
| 1|1.5|
| 2|6.0|
+---+---+
>>> def sum_func(key, pdf):
... # key is a tuple of two numpy.int64s, which is the values
... # of 'id' and 'ceil(df.v / 2)' for the current group
... return pd.DataFrame([key + (pdf.v.sum(),)])
>>> df.groupby(df.id, ceil(df.v / 2)).applyInPandas(
... sum_func, schema="id long, `ceil(v / 2)` long, v double").show() # doctest: +SKIP
+---+-----------+----+
| id|ceil(v / 2)| v|
+---+-----------+----+
| 2| 5|10.0|
| 1| 1| 3.0|
| 2| 3| 5.0|
| 2| 2| 3.0|
+---+-----------+----+
.. note:: This function requires a full shuffle. All the data of a group will be loaded
into memory, so the user should be aware of the potential OOM risk if data is skewed
and certain groups are too large to fit in memory.
.. note:: If returning a new `pandas.DataFrame` constructed with a dictionary, it is
recommended to explicitly index the columns by name to ensure the positions are correct,
or alternatively use an `OrderedDict`.
For example, `pd.DataFrame({'id': ids, 'a': data}, columns=['id', 'a'])` or
`pd.DataFrame(OrderedDict([('id', ids), ('a', data)]))`.
.. note:: Experimental
.. seealso:: :meth:`pyspark.sql.functions.pandas_udf`
"""
from pyspark.sql import GroupedData
from pyspark.sql.functions import pandas_udf, PandasUDFType
assert isinstance(self, GroupedData)
udf = pandas_udf(
func, returnType=schema, functionType=PandasUDFType.GROUPED_MAP)
df = self._df
udf_column = udf(*[df[col] for col in df.columns])
jdf = self._jgd.flatMapGroupsInPandas(udf_column._jc.expr())
return DataFrame(jdf, self.sql_ctx)
@since(3.0)
def cogroup(self, other):
"""
Cogroups this group with another group so that we can run cogrouped operations.
See :class:`CoGroupedData` for the operations that can be run.
"""
from pyspark.sql import GroupedData
assert isinstance(self, GroupedData)
return PandasCogroupedOps(self, other)
class PandasCogroupedOps(object):
"""
A logical grouping of two :class:`GroupedData`,
created by :func:`GroupedData.cogroup`.
.. note:: Experimental
.. versionadded:: 3.0
"""
def __init__(self, gd1, gd2):
self._gd1 = gd1
self._gd2 = gd2
self.sql_ctx = gd1.sql_ctx
@since(3.0)
def applyInPandas(self, func, schema):
"""
Applies a function to each cogroup using pandas and returns the result
as a `DataFrame`.
The function should take two `pandas.DataFrame`\\s and return another
`pandas.DataFrame`. For each side of the cogroup, all columns are passed together as a
`pandas.DataFrame` to the user-function and the returned `pandas.DataFrame` are combined as
a :class:`DataFrame`.
The `schema` should be a :class:`StructType` describing the schema of the returned
`pandas.DataFrame`. The column labels of the returned `pandas.DataFrame` must either match
the field names in the defined schema if specified as strings, or match the
field data types by position if not strings, e.g. integer indices.
The length of the returned `pandas.DataFrame` can be arbitrary.
:param func: a Python native function that takes two `pandas.DataFrame`\\s, and
outputs a `pandas.DataFrame`, or that takes one tuple (grouping keys) and two
pandas ``DataFrame``s, and outputs a pandas ``DataFrame``.
:param schema: the return type of the `func` in PySpark. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
>>> from pyspark.sql.functions import pandas_udf
>>> df1 = spark.createDataFrame(
... [(20000101, 1, 1.0), (20000101, 2, 2.0), (20000102, 1, 3.0), (20000102, 2, 4.0)],
... ("time", "id", "v1"))
>>> df2 = spark.createDataFrame(
... [(20000101, 1, "x"), (20000101, 2, "y")],
... ("time", "id", "v2"))
>>> def asof_join(l, r):
... return pd.merge_asof(l, r, on="time", by="id")
>>> df1.groupby("id").cogroup(df2.groupby("id")).applyInPandas(
... asof_join, schema="time int, id int, v1 double, v2 string"
... ).show() # doctest: +SKIP
+--------+---+---+---+
| time| id| v1| v2|
+--------+---+---+---+
|20000101| 1|1.0| x|
|20000102| 1|3.0| x|
|20000101| 2|2.0| y|
|20000102| 2|4.0| y|
+--------+---+---+---+
Alternatively, the user can define a function that takes three arguments. In this case,
the grouping key(s) will be passed as the first argument and the data will be passed as the
second and third arguments. The grouping key(s) will be passed as a tuple of numpy data
types, e.g., `numpy.int32` and `numpy.float64`. The data will still be passed in as two
`pandas.DataFrame` containing all columns from the original Spark DataFrames.
>>> def asof_join(k, l, r):
... if k == (1,):
... return pd.merge_asof(l, r, on="time", by="id")
... else:
... return pd.DataFrame(columns=['time', 'id', 'v1', 'v2'])
>>> df1.groupby("id").cogroup(df2.groupby("id")).applyInPandas(
... asof_join, "time int, id int, v1 double, v2 string").show() # doctest: +SKIP
+--------+---+---+---+
| time| id| v1| v2|
+--------+---+---+---+
|20000101| 1|1.0| x|
|20000102| 1|3.0| x|
+--------+---+---+---+
.. note:: This function requires a full shuffle. All the data of a cogroup will be loaded
into memory, so the user should be aware of the potential OOM risk if data is skewed
and certain groups are too large to fit in memory.
.. note:: If returning a new `pandas.DataFrame` constructed with a dictionary, it is
recommended to explicitly index the columns by name to ensure the positions are correct,
or alternatively use an `OrderedDict`.
For example, `pd.DataFrame({'id': ids, 'a': data}, columns=['id', 'a'])` or
`pd.DataFrame(OrderedDict([('id', ids), ('a', data)]))`.
.. note:: Experimental
.. seealso:: :meth:`pyspark.sql.functions.pandas_udf`
"""
from pyspark.sql.pandas.functions import pandas_udf
udf = pandas_udf(
func, returnType=schema, functionType=PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF)
all_cols = self._extract_cols(self._gd1) + self._extract_cols(self._gd2)
udf_column = udf(*all_cols)
jdf = self._gd1._jgd.flatMapCoGroupsInPandas(self._gd2._jgd, udf_column._jc.expr())
return DataFrame(jdf, self.sql_ctx)
@staticmethod
def _extract_cols(gd):
df = gd._df
return [df[col] for col in df.columns]
def _test():
import doctest
from pyspark.sql import SparkSession
import pyspark.sql.pandas.group_ops
globs = pyspark.sql.pandas.group_ops.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.pandas.group tests")\
.getOrCreate()
globs['spark'] = spark
(failure_count, test_count) = doctest.testmod(
pyspark.sql.pandas.group_ops, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| mit |
ericmjl/bokeh | tests/unit/bokeh/document/test_events__document.py | 1 | 20531 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# External imports
from mock import patch
# Module under test
import bokeh.document.events as bde # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
class FakeEmptyDispatcher(object): pass
class FakeFullDispatcher(object):
def __init__(self):
self.called = []
def _document_changed(self, event): self.called.append('_document_changed')
def _document_patched(self, event): self.called.append('_document_patched')
def _document_model_changed(self, event): self.called.append('_document_model_changed')
def _column_data_changed(self, event): self.called.append('_column_data_changed')
def _columns_streamed(self, event): self.called.append('_columns_streamed')
def _columns_patched(self, event): self.called.append('_columns_patched')
def _session_callback_added(self, event): self.called.append('_session_callback_added')
def _session_callback_removed(self, event): self.called.append('_session_callback_removed')
class FakeModel(object):
ref = "ref"
data = "data"
def references(self): return dict(ref1=1, ref2=2)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
# DocumentChangedEvent --------------------------------------------------------
class TestDocumentChangedEvent(object):
def test_init(self) -> None:
e = bde.DocumentChangedEvent("doc")
assert e.document == "doc"
assert e.setter == None
assert e.callback_invoker == None
e = bde.DocumentChangedEvent("doc", "setter")
assert e.document == "doc"
assert e.setter == "setter"
assert e.callback_invoker == None
e = bde.DocumentChangedEvent("doc", callback_invoker="invoker")
assert e.document == "doc"
assert e.setter == None
assert e.callback_invoker == "invoker"
e = bde.DocumentChangedEvent("doc", "setter", "invoker")
assert e.document == "doc"
assert e.setter == "setter"
assert e.callback_invoker == "invoker"
def test_dispatch(self) -> None:
e = bde.DocumentChangedEvent("doc", "setter", "invoker")
e.dispatch(FakeEmptyDispatcher())
d = FakeFullDispatcher()
e.dispatch(d)
assert d.called == ['_document_changed']
def test_combine_ignores_all(self) -> None:
e = bde.DocumentChangedEvent("doc", "setter", "invoker")
e2 = bde.DocumentChangedEvent("doc", "setter", "invoker")
assert e.combine(e2) == False
# DocumentPatchedEvent --------------------------------------------------------
class TestDocumentPatchedEvent(object):
def test_init(self) -> None:
e = bde.DocumentPatchedEvent("doc", "setter", "invoker")
assert e.document == "doc"
assert e.setter == "setter"
assert e.callback_invoker == "invoker"
def test_generate(self) -> None:
e = bde.DocumentPatchedEvent("doc", "setter", "invoker")
with pytest.raises(NotImplementedError):
e.generate("refs", "bufs")
def test_dispatch(self) -> None:
e = bde.DocumentPatchedEvent("doc", "setter", "invoker")
e.dispatch(FakeEmptyDispatcher())
d = FakeFullDispatcher()
e.dispatch(d)
assert d.called == ['_document_changed', '_document_patched']
def test_combine_ignores_all(self) -> None:
e = bde.DocumentPatchedEvent("doc", "setter", "invoker")
e2 = bde.DocumentPatchedEvent("doc", "setter", "invoker")
assert e.combine(e2) == False
# ModelChangedEvent -----------------------------------------------------------
class TestModelChangedEvent(object):
def test_init_defaults(self) -> None:
e = bde.ModelChangedEvent("doc", "model", "attr", "old", "new", "snew")
assert e.document == "doc"
assert e.setter == None
assert e.callback_invoker == None
assert e.model == "model"
assert e.attr == "attr"
assert e.old == "old"
assert e.new == "new"
assert e.serializable_new == "snew"
assert e.hint == None
assert e.callback_invoker == None
def test_init_ignores_hint_with_setter(self) -> None:
e = bde.ModelChangedEvent("doc", "model", "attr", "old", "new", "snew", setter="setter", hint="hint", callback_invoker="invoker")
assert e.document == "doc"
assert e.setter == "setter"
assert e.callback_invoker == "invoker"
assert e.model == "model"
assert e.attr == "attr"
assert e.old == "old"
assert e.new == "new"
assert e.serializable_new == "snew"
assert e.hint == "hint"
assert e.callback_invoker == "invoker"
def test_init_uses_hint_with_no_setter(self) -> None:
e = bde.ModelChangedEvent("doc", "model", "attr", "old", "new", "snew", hint="hint", callback_invoker="invoker")
assert e.document == "doc"
assert e.setter == None
assert e.callback_invoker == "invoker"
assert e.model == "model"
assert e.attr == "attr"
assert e.old == "old"
assert e.new == "new"
assert e.serializable_new == "snew"
assert e.hint == "hint"
assert e.callback_invoker == "invoker"
# TODO (bev) tests for generate
def test_dispatch(self) -> None:
e = bde.ModelChangedEvent("doc", "model", "attr", "old", "new", "snew")
e.dispatch(FakeEmptyDispatcher())
d = FakeFullDispatcher()
e.dispatch(d)
assert d.called == ['_document_changed', '_document_patched', '_document_model_changed']
def test_combine_ignores_except_title_changd_event(self) -> None:
e = bde.ModelChangedEvent("doc", "model", "attr", "old", "new", "snew")
e2 = bde.DocumentPatchedEvent("doc", "setter", "invoker")
assert e.combine(e2) == False
def test_combine_ignores_different_setter(self) -> None:
e = bde.ModelChangedEvent("doc", "model", "attr", "old", "new", "snew", None, "setter")
e2 = bde.ModelChangedEvent("doc", "model", "attr", "old2", "new2", "snew2", None, "setter2")
assert e.combine(e2) == False
def test_combine_ignores_different_doc(self) -> None:
e = bde.ModelChangedEvent("doc", "model", "attr", "old", "new", "snew")
e2 = bde.ModelChangedEvent("doc2", "model", "attr", "old2", "new2", "snew2")
assert e.combine(e2) == False
def test_combine_ignores_different_model(self) -> None:
e = bde.ModelChangedEvent("doc", "model", "attr", "old", "new", "snew")
e2 = bde.ModelChangedEvent("doc", "model2", "attr", "old2", "new2", "snew2")
assert e.combine(e2) == False
def test_combine_ignores_different_attr(self) -> None:
e = bde.ModelChangedEvent("doc", "model", "attr", "old", "new", "snew")
e2 = bde.ModelChangedEvent("doc", "model", "attr2", "old2", "new2", "snew2")
assert e.combine(e2) == False
def test_combine_with_matching_model_changed_event(self) -> None:
e = bde.ModelChangedEvent("doc", "model", "attr", "old", "new", "snew", callback_invoker="invoker")
e2 = bde.ModelChangedEvent("doc", "model", "attr", "old2", "new2", "snew2", callback_invoker="invoker2")
assert e.combine(e2) == True
assert e.old == "old" # keeps original old value
assert e.new == "new2"
assert e.serializable_new == "snew2"
assert e.callback_invoker == "invoker2"
@patch("bokeh.document.events.ColumnsStreamedEvent.combine")
def test_combine_with_hint_defers(self, mock_combine) -> None:
mock_combine.return_value = False
m = FakeModel()
h = bde.ColumnsStreamedEvent("doc", m, dict(foo=1), 200, "setter", "invoker")
h2 = bde.ColumnsStreamedEvent("doc", m, dict(foo=2), 300, "setter", "invoker")
e = bde.ModelChangedEvent("doc", "model", "attr", "old", "new", "snew", hint=h, callback_invoker="invoker")
e2 = bde.ModelChangedEvent("doc", "model", "attr", "old2", "new2", "snew2", hint=h2, callback_invoker="invoker2")
assert e.combine(e2) == False
assert mock_combine.call_count == 1
assert mock_combine.call_args[0] == (h2,)
assert mock_combine.call_args[1] == {}
# ColumnDataChangedEvent ------------------------------------------------------
class TestColumnDataChangedEvent(object):
def test_init(self) -> None:
m = FakeModel()
e = bde.ColumnDataChangedEvent("doc", m, [1,2], "setter", "invoker")
assert e.document == "doc"
assert e.column_source == m
assert e.cols == [1,2]
assert e.setter == "setter"
assert e.callback_invoker == "invoker"
@patch("bokeh.util.serialization.transform_column_source_data")
def test_generate(self, mock_tcds) -> None:
mock_tcds.return_value = "new"
m = FakeModel()
e = bde.ColumnDataChangedEvent("doc", m, [1,2], "setter", "invoker")
refs = dict(foo=10)
bufs = set()
r = e.generate(refs, bufs)
assert r == dict(kind="ColumnDataChanged", column_source="ref", new="new", cols=[1,2])
assert refs == dict(foo=10)
assert bufs == set()
def test_dispatch(self) -> None:
m = FakeModel()
e = bde.ColumnDataChangedEvent("doc", m, [1,2], "setter", "invoker")
e.dispatch(FakeEmptyDispatcher())
d = FakeFullDispatcher()
e.dispatch(d)
assert d.called == ['_document_changed', '_document_patched', '_column_data_changed']
def test_combine_ignores_all(self) -> None:
m = FakeModel()
e = bde.ColumnDataChangedEvent("doc", m, [1,2], "setter", "invoker")
e2 = bde.ColumnDataChangedEvent("doc", m, [3,4], "setter", "invoker")
assert e.combine(e2) == False
assert e.cols == [1,2]
# ColumnsStreamedEvent --------------------------------------------------------
class TestColumnsStreamedEvent(object):
def test_init(self) -> None:
m = FakeModel()
e = bde.ColumnsStreamedEvent("doc", m, dict(foo=1), 200, "setter", "invoker")
assert e.document == "doc"
assert e.column_source == m
assert e.data == dict(foo=1)
assert e.rollover == 200
assert e.setter == "setter"
assert e.callback_invoker == "invoker"
def test_generate(self) -> None:
m = FakeModel()
e = bde.ColumnsStreamedEvent("doc", m, dict(foo=1), 200, "setter", "invoker")
refs = dict(foo=10)
bufs = set()
r = e.generate(refs, bufs)
assert r == dict(kind="ColumnsStreamed", column_source="ref", data=dict(foo=1), rollover=200)
assert refs == dict(foo=10)
assert bufs == set()
def test_dispatch(self) -> None:
m = FakeModel()
e = bde.ColumnsStreamedEvent("doc", m, dict(foo=1), 200, "setter", "invoker")
e.dispatch(FakeEmptyDispatcher())
d = FakeFullDispatcher()
e.dispatch(d)
assert d.called == ['_document_changed', '_document_patched', '_columns_streamed']
def test_combine_ignores_all(self) -> None:
m = FakeModel()
e = bde.ColumnsStreamedEvent("doc", m, dict(foo=1), 200, "setter", "invoker")
e2 = bde.ColumnsStreamedEvent("doc", m, dict(foo=2), 300, "setter", "invoker")
assert e.combine(e2) == False
assert e.column_source is m
assert e.data == dict(foo=1)
assert e.rollover == 200
def test_pandas_data(self, pd) -> None:
m = FakeModel()
df = pd.DataFrame({'x': [1, 2, 3], 'y': [4, 5, 6]})
e = bde.ColumnsStreamedEvent("doc", m, df, 200, "setter", "invoker")
assert isinstance(e.data, dict)
assert e.data == {c: df[c] for c in df.columns}
# ColumnsPatchedEvent ---------------------------------------------------------
class TestColumnsPatchedEvent(object):
def test_init(self) -> None:
m = FakeModel()
e = bde.ColumnsPatchedEvent("doc", m, [1, 2], "setter", "invoker")
assert e.document == "doc"
assert e.column_source == m
assert e.patches == [1, 2]
assert e.setter == "setter"
assert e.callback_invoker == "invoker"
def test_generate(self) -> None:
m = FakeModel()
e = bde.ColumnsPatchedEvent("doc", m, [1, 2], "setter", "invoker")
refs = dict(foo=10)
bufs = set()
r = e.generate(refs, bufs)
assert r == dict(kind="ColumnsPatched", column_source="ref", patches=[1,2])
assert refs == dict(foo=10)
assert bufs == set()
def test_dispatch(self) -> None:
m = FakeModel()
e = bde.ColumnsPatchedEvent("doc", m, [1, 2], "setter", "invoker")
e.dispatch(FakeEmptyDispatcher())
d = FakeFullDispatcher()
e.dispatch(d)
assert d.called == ['_document_changed', '_document_patched', '_columns_patched']
def test_combine_ignores_all(self) -> None:
m = FakeModel()
e = bde.ColumnsPatchedEvent("doc", m, [1,2], "setter", "invoker")
e2 = bde.ColumnsPatchedEvent("doc", m, [3,4], "setter", "invoker")
assert e.combine(e2) == False
assert e.patches == [1,2]
# TitleChangedEvent -----------------------------------------------------------
class TestTitleChangedEvent(object):
def test_init(self) -> None:
e = bde.TitleChangedEvent("doc", "title", "setter", "invoker")
assert e.document == "doc"
assert e.title == "title"
assert e.setter == "setter"
assert e.callback_invoker == "invoker"
def test_generate(self) -> None:
e = bde.TitleChangedEvent("doc", "title", "setter", "invoker")
refs = dict(foo=10)
bufs = set()
r = e.generate(refs, bufs)
assert r == dict(kind="TitleChanged", title="title")
assert refs == dict(foo=10)
assert bufs == set()
def test_dispatch(self) -> None:
e = bde.TitleChangedEvent("doc", "title", "setter", "invoker")
e.dispatch(FakeEmptyDispatcher())
d = FakeFullDispatcher()
e.dispatch(d)
assert d.called == ['_document_changed', '_document_patched']
def test_combine_ignores_except_title_changd_event(self) -> None:
e = bde.TitleChangedEvent("doc", "title", "setter", "invoker")
e2 = bde.DocumentPatchedEvent("doc", "setter", "invoker")
assert e.combine(e2) == False
assert e.title == "title"
assert e.callback_invoker == "invoker"
def test_combine_ignores_different_setter(self) -> None:
e = bde.TitleChangedEvent("doc", "title", "setter", "invoker")
e2 = bde.TitleChangedEvent("doc", "title2", "setter2", "invoker2")
assert e.combine(e2) == False
assert e.title == "title"
assert e.callback_invoker == "invoker"
def test_combine_ignores_different_doc(self) -> None:
e = bde.TitleChangedEvent("doc", "title", "setter", "invoker")
e2 = bde.TitleChangedEvent("doc2", "title2", "setter2", "invoker2")
assert e.combine(e2) == False
assert e.title == "title"
assert e.callback_invoker == "invoker"
def test_combine_with_title_changed_event(self) -> None:
e = bde.TitleChangedEvent("doc", "title", "setter", "invoker")
e2 = bde.TitleChangedEvent("doc", "title2", "setter", "invoker2")
assert e.combine(e2) == True
assert e.title == "title2"
assert e.callback_invoker == "invoker2"
# RootAddedEvent --------------------------------------------------------------
class TestRootAddedEvent(object):
def test_init(self) -> None:
m = FakeModel()
e = bde.RootAddedEvent("doc", m, "setter", "invoker")
assert e.document == "doc"
assert e.model == m
assert e.setter == "setter"
assert e.callback_invoker == "invoker"
def test_generate(self) -> None:
m = FakeModel()
e = bde.RootAddedEvent("doc", m, "setter", "invoker")
refs = dict(foo=10)
bufs = set()
r = e.generate(refs, bufs)
assert r == dict(kind="RootAdded", model="ref")
assert refs == dict(foo=10, ref1=1, ref2=2)
assert bufs == set()
def test_dispatch(self) -> None:
m = FakeModel()
e = bde.RootAddedEvent("doc", m, "setter", "invoker")
e.dispatch(FakeEmptyDispatcher())
d = FakeFullDispatcher()
e.dispatch(d)
assert d.called == ['_document_changed', '_document_patched']
# RootRemovedEvent ------------------------------------------------------------
class TestRootRemovedEvent(object):
def test_init(self) -> None:
m = FakeModel()
e = bde.RootRemovedEvent("doc", m, "setter", "invoker")
assert e.document == "doc"
assert e.model == m
assert e.setter == "setter"
assert e.callback_invoker == "invoker"
def test_generate(self) -> None:
m = FakeModel()
e = bde.RootRemovedEvent("doc", m, "setter", "invoker")
refs = dict(foo=10)
bufs = set()
r = e.generate(refs, bufs)
assert r == dict(kind="RootRemoved", model="ref")
assert refs == dict(foo=10)
assert bufs == set()
def test_dispatch(self) -> None:
m = FakeModel()
e = bde.RootRemovedEvent("doc", m, "setter", "invoker")
e.dispatch(FakeEmptyDispatcher())
d = FakeFullDispatcher()
e.dispatch(d)
assert d.called == ['_document_changed', '_document_patched']
# SessionCallbackAdded --------------------------------------------------------
class TestSessionCallbackAdded(object):
def test_init(self) -> None:
e = bde.SessionCallbackAdded("doc", "callback")
assert e.document == "doc"
assert e.callback == "callback"
assert e.setter == None
assert e.callback_invoker == None
def test_dispatch(self) -> None:
e = bde.SessionCallbackAdded("doc", "callback")
e.dispatch(FakeEmptyDispatcher())
d = FakeFullDispatcher()
e.dispatch(d)
assert d.called == ['_document_changed', '_session_callback_added']
def test_combine_ignores_all(self) -> None:
e = bde.SessionCallbackAdded("doc", "setter")
e2 = bde.SessionCallbackAdded("doc", "setter")
assert e.combine(e2) == False
# SessionCallbackRemoved ------------------------------------------------------
class TestSessionCallbackRemoved(object):
def test_init(self) -> None:
e = bde.SessionCallbackRemoved("doc", "callback")
assert e.document == "doc"
assert e.callback == "callback"
assert e.setter == None
assert e.callback_invoker == None
def test_dispatch(self) -> None:
e = bde.SessionCallbackRemoved("doc", "callback")
e.dispatch(FakeEmptyDispatcher())
d = FakeFullDispatcher()
e.dispatch(d)
assert d.called == ['_document_changed', '_session_callback_removed']
def test_combine_ignores_all(self) -> None:
e = bde.SessionCallbackAdded("doc", "setter")
e2 = bde.SessionCallbackAdded("doc", "setter")
assert e.combine(e2) == False
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause |
JPFrancoia/scikit-learn | sklearn/linear_model/coordinate_descent.py | 13 | 81631 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Gael Varoquaux <gael.varoquaux@inria.fr>
#
# License: BSD 3 clause
import sys
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from .base import _preprocess_data
from ..utils import check_array, check_X_y, deprecated
from ..utils.validation import check_random_state
from ..model_selection import check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils.validation import column_or_1d
from ..exceptions import ConvergenceWarning
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The elastic net mixing parameter, with ``0 <= l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For
l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio <
1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean, default True
Whether to fit an intercept or not
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, 'csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = _preprocess_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_offset, _, X_scale = _preprocess_data(X, y, fit_intercept,
normalize,
return_mean=True)
mean_dot = X_offset * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_scale[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
positive : bool, default False
If set to True, forces coefficients to be positive.
return_n_iter : bool
whether to return the number of iterations or not.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, return_n_iter=return_n_iter, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False,
check_input=True, **params):
"""Compute elastic net path with coordinate descent
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
return_n_iter : bool
whether to return the number of iterations or not.
positive : bool, default False
If set to True, forces coefficients to be positive.
check_input : bool, default True
Skip input validation checks, including the Gram matrix when provided
assuming there are handled by the caller when check_input=False.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py for an example.
See also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
"""
# We expect X and y to be already Fortran ordered when bypassing
# checks
if check_input:
X = check_array(X, 'csc', dtype=[np.float64, np.float32],
order='F', copy=copy_X)
y = check_array(y, 'csc', dtype=X.dtype.type, order='F', copy=False,
ensure_2d=False)
if Xy is not None:
# Xy should be a 1d contiguous array or a 2D C ordered array
Xy = check_array(Xy, dtype=X.dtype.type, order='C', copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_offset' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_offset'] / params['X_scale']
X_sparse_scaling = np.asarray(X_sparse_scaling, dtype=X.dtype)
else:
X_sparse_scaling = np.zeros(n_features, dtype=X.dtype)
# X should be normalized and fit already if function is called
# from ElasticNet.fit
if check_input:
X, y, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False,
fit_intercept=False, copy=False)
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=X.dtype)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=X.dtype)
if coef_init is None:
coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1], dtype=X.dtype))
else:
coef_ = np.asfortranarray(coef_init, dtype=X.dtype)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
# We expect precompute to be already Fortran ordered when bypassing
# checks
if check_input:
precompute = check_array(precompute, dtype=np.float64,
order='C')
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like. Got %r" % precompute)
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations.' +
' Fitting data with very small alpha' +
' may cause precision problems.',
ConvergenceWarning)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the penalty terms. Defaults to 1.0.
See the notes for the exact mathematical meaning of this
parameter.``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the ``Lasso`` object is not advised.
Given this, you should use the :class:`LinearRegression` object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
precompute : True | False | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. The Gram matrix can also be passed as argument.
For sparse input this option is always ``True`` to preserve sparsity.
max_iter : int, optional
The maximum number of iterations
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
SGDRegressor: implements elastic net regression with incremental training.
SGDClassifier: implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log", penalty="elasticnet")``).
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute=False, max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
self.random_state = random_state
self.selection = selection
def fit(self, X, y, check_input=True):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
if isinstance(self.precompute, six.string_types):
raise ValueError('precompute should be one of True, False or'
' array-like. Got %r' % self.precompute)
# We expect X and y to be float64 or float32 Fortran ordered arrays
# when bypassing checks
if check_input:
X, y = check_X_y(X, y, accept_sparse='csc',
order='F', dtype=[np.float64, np.float32],
copy=self.copy_X and self.fit_intercept,
multi_output=True, y_numeric=True)
y = check_array(y, order='F', copy=False, dtype=X.dtype.type,
ensure_2d=False)
X, y, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=False)
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if self.selection not in ['cyclic', 'random']:
raise ValueError("selection should be either random or cyclic.")
if not self.warm_start or self.coef_ is None:
coef_ = np.zeros((n_targets, n_features), dtype=X.dtype,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=X.dtype)
self.n_iter_ = []
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_offset=X_offset, X_scale=X_scale, return_n_iter=True,
coef_init=coef_[k], max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection,
check_input=False)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_offset, y_offset, X_scale)
# workaround since _set_intercept will cast self.coef_ into float64
self.coef_ = np.asarray(self.coef_, dtype=X.dtype)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted ``coef_`` """
return sparse.csr_matrix(self.coef_)
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
check_is_fitted(self, 'n_iter_')
if sparse.isspmatrix(X):
return safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
else:
return super(ElasticNet, self)._decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the ``Lasso`` object is not advised.
Given this, you should use the :class:`LinearRegression` object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | array-like, default=False
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive, random_state=random_state,
selection=selection)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, optional
Array of float that is used for cross-validation. If not
provided, computed using 'path'
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_offset, y_offset, X_scale, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_offset'] = X_offset
path_params['X_scale'] = X_scale
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_offset = np.atleast_1d(y_offset)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_scale)
coefs[:, nonzeros] /= X_scale[nonzeros][:, np.newaxis]
intercepts = y_offset[:, np.newaxis] - np.dot(X_offset, coefs)
if sparse.issparse(X_test):
n_order, n_features, n_alphas = coefs.shape
# Work around for sparse matrices since coefs is a 3-D numpy array.
coefs_feature_major = np.rollaxis(coefs, 1)
feature_2d = np.reshape(coefs_feature_major, (n_features, -1))
X_test_coefs = safe_sparse_dot(X_test, feature_2d)
X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1)
else:
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as float64, Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
y = np.asarray(y, dtype=np.float64)
if y.shape[0] == 0:
raise ValueError("y has 0 samples: %r" % y)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV):
if model_str == 'ElasticNet':
model = ElasticNet()
else:
model = Lasso()
if y.ndim > 1 and y.shape[1] > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%sCV" % (model_str))
y = column_or_1d(y, warn=True)
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % (model_str))
if model_str == 'ElasticNet':
model = MultiTaskElasticNet()
else:
model = MultiTaskLasso()
if self.selection not in ["random", "cyclic"]:
raise ValueError("selection should be either random or cyclic.")
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering or float64 so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = check_array(X, 'csc', copy=False)
if sparse.isspmatrix(X):
if (hasattr(reference_to_old_X, "data") and
not np.may_share_memory(reference_to_old_X.data, X.data)):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
copy_X = False
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = []
for l1_ratio in l1_ratios:
alphas.append(_alpha_grid(
X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X))
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv.split(X))
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=np.float64)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.precompute = False
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
If positive, restrict regression coefficients to be positive
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean, default True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting
dual_gap_ : ndarray, shape ()
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/plot_lasso_model_selection.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
l1_ratio : float or array of floats, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path, used for each l1_ratio.
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
intercept_ : float | array, shape (n_targets, n_features)
Independent term in the decision function.
mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/plot_lasso_model_selection.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, positive=False, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit MultiTaskLasso model with coordinate descent
Parameters
-----------
X : ndarray, shape (n_samples, n_features)
Data
y : ndarray, shape (n_samples, n_tasks)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# X and y must be of type float64
X = check_array(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
y = check_array(y, dtype=np.float64, ensure_2d=False)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_offset, y_offset, X_scale = _preprocess_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
if self.selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (self.selection == 'random')
self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol,
check_random_state(self.random_state), random)
self._set_intercept(X_offset, y_offset, X_scale)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_tasks, n_features)
parameter vector (W in the cost function formula)
intercept_ : array, shape (n_tasks,)
independent term in decision function.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, random_state=None, selection='cyclic', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
self.random_state = random_state
self.selection = selection
class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 ElasticNet with built-in cross-validation.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
l1_ratio : float or array of floats
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds) or \
(n_l1_ratio, n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio
l1_ratio_ : float
best l1_ratio obtained by cross-validation.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNetCV()
>>> clf.fit([[0,0], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001,
fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100,
n_jobs=1, normalize=False, random_state=None, selection='cyclic',
tol=0.0001, verbose=0)
>>> print(clf.coef_)
[[ 0.52875032 0.46958558]
[ 0.52875032 0.46958558]]
>>> print(clf.intercept_)
[ 0.00166409 0.00166409]
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskLassoCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False,
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.selection = selection
class MultiTaskLassoCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 Lasso with built-in cross-validation.
The optimization objective for MultiTaskLasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
This parameter is ignored when ``fit_intercept`` is set to ``False``.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
:class:`preprocessing.StandardScaler` before calling ``fit`` on an estimator
with ``normalize=False``.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskElasticNetCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-4, copy_X=True,
cv=None, verbose=False, n_jobs=1, random_state=None,
selection='cyclic'):
super(MultiTaskLassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state,
selection=selection)
| bsd-3-clause |