repo_name
stringlengths
7
60
path
stringlengths
6
134
copies
stringlengths
1
3
size
stringlengths
4
6
content
stringlengths
1.04k
149k
license
stringclasses
12 values
alexei-matveev/ase-local
doc/exercises/siesta1/answer1.py
3
1197
# -*- coding: utf-8 -*- # creates: ener.png distance.png angle.png import os import matplotlib matplotlib.use('Agg') import pylab as plt e_s = [0.01,0.1,0.2,0.3,0.4,0.5] E = [-463.2160, -462.9633, -462.4891, -462.0551, -461.5426, -461.1714] d = [1.1131, 1.1046, 1.0960, 1.0901, 1.0857, 1.0810] alpha = [100.832453365, 99.568214268, 99.1486065462, 98.873671379, 98.1726341945, 98.0535643778] fig=plt.figure(figsize=(3, 2.5)) fig.subplots_adjust(left=.29, right=.96, top=.9, bottom=0.16) plt.plot(e_s, E, 'o-') plt.xlabel(u'Energy shift [eV]') plt.ylabel(u'Energy [eV]') plt.title('Total Energy vs Eshift') plt.savefig('ener.png') fig=plt.figure(figsize=(3, 2.5)) fig.subplots_adjust(left=.24, right=.96, top=.9, bottom=0.16) plt.plot(e_s, d, 'o-') plt.xlabel(u'Energy shift [eV]') plt.ylabel(u'O-H distance [Å]') limits = plt.axis('tight') plt.title('O-H distance vs Eshift') plt.savefig('distance.png') fig=plt.figure(figsize=(3, 2.5)) fig.subplots_adjust(left=.26, right=.96, top=.9, bottom=0.16) plt.plot(e_s, alpha, 'o-') plt.xlabel(u'Energy shift [eV]') plt.ylabel(u'H20 angle') limits = plt.axis('tight') plt.title('O-H distance vs Eshift') plt.savefig('angle.png')
gpl-2.0
nmayorov/scikit-learn
examples/plot_multilabel.py
236
4157
# Authors: Vlad Niculae, Mathieu Blondel # License: BSD 3 clause """ ========================= Multilabel classification ========================= This example simulates a multi-label document classification problem. The dataset is generated randomly based on the following process: - pick the number of labels: n ~ Poisson(n_labels) - n times, choose a class c: c ~ Multinomial(theta) - pick the document length: k ~ Poisson(length) - k times, choose a word: w ~ Multinomial(theta_c) In the above process, rejection sampling is used to make sure that n is more than 2, and that the document length is never zero. Likewise, we reject classes which have already been chosen. The documents that are assigned to both classes are plotted surrounded by two colored circles. The classification is performed by projecting to the first two principal components found by PCA and CCA for visualisation purposes, followed by using the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two SVCs with linear kernels to learn a discriminative model for each class. Note that PCA is used to perform an unsupervised dimensionality reduction, while CCA is used to perform a supervised one. Note: in the plot, "unlabeled samples" does not mean that we don't know the labels (as in semi-supervised learning) but that the samples simply do *not* have a label. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import make_multilabel_classification from sklearn.multiclass import OneVsRestClassifier from sklearn.svm import SVC from sklearn.preprocessing import LabelBinarizer from sklearn.decomposition import PCA from sklearn.cross_decomposition import CCA def plot_hyperplane(clf, min_x, max_x, linestyle, label): # get the separating hyperplane w = clf.coef_[0] a = -w[0] / w[1] xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough yy = a * xx - (clf.intercept_[0]) / w[1] plt.plot(xx, yy, linestyle, label=label) def plot_subfigure(X, Y, subplot, title, transform): if transform == "pca": X = PCA(n_components=2).fit_transform(X) elif transform == "cca": X = CCA(n_components=2).fit(X, Y).transform(X) else: raise ValueError min_x = np.min(X[:, 0]) max_x = np.max(X[:, 0]) min_y = np.min(X[:, 1]) max_y = np.max(X[:, 1]) classif = OneVsRestClassifier(SVC(kernel='linear')) classif.fit(X, Y) plt.subplot(2, 2, subplot) plt.title(title) zero_class = np.where(Y[:, 0]) one_class = np.where(Y[:, 1]) plt.scatter(X[:, 0], X[:, 1], s=40, c='gray') plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b', facecolors='none', linewidths=2, label='Class 1') plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange', facecolors='none', linewidths=2, label='Class 2') plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--', 'Boundary\nfor class 1') plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.', 'Boundary\nfor class 2') plt.xticks(()) plt.yticks(()) plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x) plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y) if subplot == 2: plt.xlabel('First principal component') plt.ylabel('Second principal component') plt.legend(loc="upper left") plt.figure(figsize=(8, 6)) X, Y = make_multilabel_classification(n_classes=2, n_labels=1, allow_unlabeled=True, random_state=1) plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca") plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca") X, Y = make_multilabel_classification(n_classes=2, n_labels=1, allow_unlabeled=False, random_state=1) plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca") plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca") plt.subplots_adjust(.04, .02, .97, .94, .09, .2) plt.show()
bsd-3-clause
paulgradie/SeqPyPlot
main_app/seqpyplot/parsers/htseq_parser.py
1
2244
""" Read a directory of expression counts in ht-seq format. Each sample should be an individual file in the directory. File names and sample order are specified in the config file (order is determined by order IN the config.) This class is intended to return the raw dataframe of samples with missing sample columns as NaN. """ import pandas as pd from pathos.multiprocessing import ProcessPool import pathlib try: from functools import reduce # for py3 compatibility except ImportError: pass class HtSeqParser(object): def __init__(self, nodes=2): self.nodes = nodes def parse_data(self, data_paths, sample_names): """ Read the input files from the config file and load in to a pandas dataframe. params data_paths: list of file paths specified in the config. Returned from config parse sample_names: list of sample names specified in the config returned from config parse """ output = self.load_data(data_paths, sample_names) data, ercc_df = (self.merge_dfs(output) .pipe(self.df_cleanup) .pipe(self.split_on_ercc)) return data, ercc_df def load_data(self, data_paths, sample_names): " Multiprocess load of files in to a list of dfs " pool = ProcessPool(nodes=self.nodes) dfs = pool.map(self.load_func, zip(data_paths, sample_names)) return dfs @staticmethod def load_func(data_tuple): path, sample_name = data_tuple return pd.read_csv(path, sep='\t', names=['gene', sample_name]) def merge_dfs(self, dfs): return reduce(lambda x, y: pd.merge(x, y, on='gene', how='outer'), dfs) def df_cleanup(self, df_old): " Clean away unwanted columns, reset index, and fillna " df = df_old.copy() df = df[df['gene'].str.startswith('__') == False] df.set_index('gene', inplace=True) df.fillna(value='Nan', inplace=True) return df def split_on_ercc(self, df): " Extract the ERCC data " ercc_cols = df.index.str.startswith('ERCC-') ercc_df = df[ercc_cols] data = df[~ercc_cols] return data, ercc_df
gpl-3.0
mattilyra/scikit-learn
sklearn/tests/test_pipeline.py
23
15392
""" Test the pipeline module. """ import numpy as np from scipy import sparse from sklearn.externals.six.moves import zip from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_raises_regex from sklearn.utils.testing import assert_raise_message from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_false from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_warns_message from sklearn.base import clone from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union from sklearn.svm import SVC from sklearn.linear_model import LogisticRegression from sklearn.linear_model import LinearRegression from sklearn.cluster import KMeans from sklearn.feature_selection import SelectKBest, f_classif from sklearn.decomposition import PCA, TruncatedSVD from sklearn.datasets import load_iris from sklearn.preprocessing import StandardScaler from sklearn.feature_extraction.text import CountVectorizer JUNK_FOOD_DOCS = ( "the pizza pizza beer copyright", "the pizza burger beer copyright", "the the pizza beer beer copyright", "the burger beer beer copyright", "the coke burger coke copyright", "the coke burger burger", ) class IncorrectT(object): """Small class to test parameter dispatching. """ def __init__(self, a=None, b=None): self.a = a self.b = b class T(IncorrectT): def fit(self, X, y): return self def get_params(self, deep=False): return {'a': self.a, 'b': self.b} def set_params(self, **params): self.a = params['a'] return self class TransfT(T): def transform(self, X, y=None): return X def inverse_transform(self, X): return X class FitParamT(object): """Mock classifier """ def __init__(self): self.successful = False def fit(self, X, y, should_succeed=False): self.successful = should_succeed def predict(self, X): return self.successful def test_pipeline_init(): # Test the various init parameters of the pipeline. assert_raises(TypeError, Pipeline) # Check that we can't instantiate pipelines with objects without fit # method pipe = assert_raises(TypeError, Pipeline, [('svc', IncorrectT)]) # Smoke test with only an estimator clf = T() pipe = Pipeline([('svc', clf)]) assert_equal(pipe.get_params(deep=True), dict(svc__a=None, svc__b=None, svc=clf, **pipe.get_params(deep=False))) # Check that params are set pipe.set_params(svc__a=0.1) assert_equal(clf.a, 0.1) assert_equal(clf.b, None) # Smoke test the repr: repr(pipe) # Test with two objects clf = SVC() filter1 = SelectKBest(f_classif) pipe = Pipeline([('anova', filter1), ('svc', clf)]) # Check that we can't use the same stage name twice assert_raises(ValueError, Pipeline, [('svc', SVC()), ('svc', SVC())]) # Check that params are set pipe.set_params(svc__C=0.1) assert_equal(clf.C, 0.1) # Smoke test the repr: repr(pipe) # Check that params are not set when naming them wrong assert_raises(ValueError, pipe.set_params, anova__C=0.1) # Test clone pipe2 = clone(pipe) assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc']) # Check that apart from estimators, the parameters are the same params = pipe.get_params(deep=True) params2 = pipe2.get_params(deep=True) for x in pipe.get_params(deep=False): params.pop(x) for x in pipe2.get_params(deep=False): params2.pop(x) # Remove estimators that where copied params.pop('svc') params.pop('anova') params2.pop('svc') params2.pop('anova') assert_equal(params, params2) def test_pipeline_methods_anova(): # Test the various methods of the pipeline (anova). iris = load_iris() X = iris.data y = iris.target # Test with Anova + LogisticRegression clf = LogisticRegression() filter1 = SelectKBest(f_classif, k=2) pipe = Pipeline([('anova', filter1), ('logistic', clf)]) pipe.fit(X, y) pipe.predict(X) pipe.predict_proba(X) pipe.predict_log_proba(X) pipe.score(X, y) def test_pipeline_fit_params(): # Test that the pipeline can take fit parameters pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())]) pipe.fit(X=None, y=None, clf__should_succeed=True) # classifier should return True assert_true(pipe.predict(None)) # and transformer params should not be changed assert_true(pipe.named_steps['transf'].a is None) assert_true(pipe.named_steps['transf'].b is None) def test_pipeline_raise_set_params_error(): # Test pipeline raises set params error message for nested models. pipe = Pipeline([('cls', LinearRegression())]) # expected error message error_msg = ('Invalid parameter %s for estimator %s. ' 'Check the list of available parameters ' 'with `estimator.get_params().keys()`.') assert_raise_message(ValueError, error_msg % ('fake', 'Pipeline'), pipe.set_params, fake='nope') # nested model check assert_raise_message(ValueError, error_msg % ("fake", pipe), pipe.set_params, fake__estimator='nope') def test_pipeline_methods_pca_svm(): # Test the various methods of the pipeline (pca + svm). iris = load_iris() X = iris.data y = iris.target # Test with PCA + SVC clf = SVC(probability=True, random_state=0) pca = PCA(svd_solver='full', n_components='mle', whiten=True) pipe = Pipeline([('pca', pca), ('svc', clf)]) pipe.fit(X, y) pipe.predict(X) pipe.predict_proba(X) pipe.predict_log_proba(X) pipe.score(X, y) def test_pipeline_methods_preprocessing_svm(): # Test the various methods of the pipeline (preprocessing + svm). iris = load_iris() X = iris.data y = iris.target n_samples = X.shape[0] n_classes = len(np.unique(y)) scaler = StandardScaler() pca = PCA(n_components=2, svd_solver='randomized', whiten=True) clf = SVC(probability=True, random_state=0, decision_function_shape='ovr') for preprocessing in [scaler, pca]: pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)]) pipe.fit(X, y) # check shapes of various prediction functions predict = pipe.predict(X) assert_equal(predict.shape, (n_samples,)) proba = pipe.predict_proba(X) assert_equal(proba.shape, (n_samples, n_classes)) log_proba = pipe.predict_log_proba(X) assert_equal(log_proba.shape, (n_samples, n_classes)) decision_function = pipe.decision_function(X) assert_equal(decision_function.shape, (n_samples, n_classes)) pipe.score(X, y) def test_fit_predict_on_pipeline(): # test that the fit_predict method is implemented on a pipeline # test that the fit_predict on pipeline yields same results as applying # transform and clustering steps separately iris = load_iris() scaler = StandardScaler() km = KMeans(random_state=0) # first compute the transform and clustering step separately scaled = scaler.fit_transform(iris.data) separate_pred = km.fit_predict(scaled) # use a pipeline to do the transform and clustering in one step pipe = Pipeline([('scaler', scaler), ('Kmeans', km)]) pipeline_pred = pipe.fit_predict(iris.data) assert_array_almost_equal(pipeline_pred, separate_pred) def test_fit_predict_on_pipeline_without_fit_predict(): # tests that a pipeline does not have fit_predict method when final # step of pipeline does not have fit_predict defined scaler = StandardScaler() pca = PCA(svd_solver='full') pipe = Pipeline([('scaler', scaler), ('pca', pca)]) assert_raises_regex(AttributeError, "'PCA' object has no attribute 'fit_predict'", getattr, pipe, 'fit_predict') def test_feature_union(): # basic sanity check for feature union iris = load_iris() X = iris.data X -= X.mean(axis=0) y = iris.target svd = TruncatedSVD(n_components=2, random_state=0) select = SelectKBest(k=1) fs = FeatureUnion([("svd", svd), ("select", select)]) fs.fit(X, y) X_transformed = fs.transform(X) assert_equal(X_transformed.shape, (X.shape[0], 3)) # check if it does the expected thing assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X)) assert_array_equal(X_transformed[:, -1], select.fit_transform(X, y).ravel()) # test if it also works for sparse input # We use a different svd object to control the random_state stream fs = FeatureUnion([("svd", svd), ("select", select)]) X_sp = sparse.csr_matrix(X) X_sp_transformed = fs.fit_transform(X_sp, y) assert_array_almost_equal(X_transformed, X_sp_transformed.toarray()) # test setting parameters fs.set_params(select__k=2) assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4)) # test it works with transformers missing fit_transform fs = FeatureUnion([("mock", TransfT()), ("svd", svd), ("select", select)]) X_transformed = fs.fit_transform(X, y) assert_equal(X_transformed.shape, (X.shape[0], 8)) def test_make_union(): pca = PCA(svd_solver='full') mock = TransfT() fu = make_union(pca, mock) names, transformers = zip(*fu.transformer_list) assert_equal(names, ("pca", "transft")) assert_equal(transformers, (pca, mock)) def test_pipeline_transform(): # Test whether pipeline works with a transformer at the end. # Also test pipeline.transform and pipeline.inverse_transform iris = load_iris() X = iris.data pca = PCA(n_components=2, svd_solver='full') pipeline = Pipeline([('pca', pca)]) # test transform and fit_transform: X_trans = pipeline.fit(X).transform(X) X_trans2 = pipeline.fit_transform(X) X_trans3 = pca.fit_transform(X) assert_array_almost_equal(X_trans, X_trans2) assert_array_almost_equal(X_trans, X_trans3) X_back = pipeline.inverse_transform(X_trans) X_back2 = pca.inverse_transform(X_trans) assert_array_almost_equal(X_back, X_back2) def test_pipeline_fit_transform(): # Test whether pipeline works with a transformer missing fit_transform iris = load_iris() X = iris.data y = iris.target transft = TransfT() pipeline = Pipeline([('mock', transft)]) # test fit_transform: X_trans = pipeline.fit_transform(X, y) X_trans2 = transft.fit(X, y).transform(X) assert_array_almost_equal(X_trans, X_trans2) def test_make_pipeline(): t1 = TransfT() t2 = TransfT() pipe = make_pipeline(t1, t2) assert_true(isinstance(pipe, Pipeline)) assert_equal(pipe.steps[0][0], "transft-1") assert_equal(pipe.steps[1][0], "transft-2") pipe = make_pipeline(t1, t2, FitParamT()) assert_true(isinstance(pipe, Pipeline)) assert_equal(pipe.steps[0][0], "transft-1") assert_equal(pipe.steps[1][0], "transft-2") assert_equal(pipe.steps[2][0], "fitparamt") def test_feature_union_weights(): # test feature union with transformer weights iris = load_iris() X = iris.data y = iris.target pca = PCA(n_components=2, svd_solver='randomized', random_state=0) select = SelectKBest(k=1) # test using fit followed by transform fs = FeatureUnion([("pca", pca), ("select", select)], transformer_weights={"pca": 10}) fs.fit(X, y) X_transformed = fs.transform(X) # test using fit_transform fs = FeatureUnion([("pca", pca), ("select", select)], transformer_weights={"pca": 10}) X_fit_transformed = fs.fit_transform(X, y) # test it works with transformers missing fit_transform fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)], transformer_weights={"mock": 10}) X_fit_transformed_wo_method = fs.fit_transform(X, y) # check against expected result # We use a different pca object to control the random_state stream assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X)) assert_array_equal(X_transformed[:, -1], select.fit_transform(X, y).ravel()) assert_array_almost_equal(X_fit_transformed[:, :-1], 10 * pca.fit_transform(X)) assert_array_equal(X_fit_transformed[:, -1], select.fit_transform(X, y).ravel()) assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7)) def test_feature_union_parallel(): # test that n_jobs work for FeatureUnion X = JUNK_FOOD_DOCS fs = FeatureUnion([ ("words", CountVectorizer(analyzer='word')), ("chars", CountVectorizer(analyzer='char')), ]) fs_parallel = FeatureUnion([ ("words", CountVectorizer(analyzer='word')), ("chars", CountVectorizer(analyzer='char')), ], n_jobs=2) fs_parallel2 = FeatureUnion([ ("words", CountVectorizer(analyzer='word')), ("chars", CountVectorizer(analyzer='char')), ], n_jobs=2) fs.fit(X) X_transformed = fs.transform(X) assert_equal(X_transformed.shape[0], len(X)) fs_parallel.fit(X) X_transformed_parallel = fs_parallel.transform(X) assert_equal(X_transformed.shape, X_transformed_parallel.shape) assert_array_equal( X_transformed.toarray(), X_transformed_parallel.toarray() ) # fit_transform should behave the same X_transformed_parallel2 = fs_parallel2.fit_transform(X) assert_array_equal( X_transformed.toarray(), X_transformed_parallel2.toarray() ) # transformers should stay fit after fit_transform X_transformed_parallel2 = fs_parallel2.transform(X) assert_array_equal( X_transformed.toarray(), X_transformed_parallel2.toarray() ) def test_feature_union_feature_names(): word_vect = CountVectorizer(analyzer="word") char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3)) ft = FeatureUnion([("chars", char_vect), ("words", word_vect)]) ft.fit(JUNK_FOOD_DOCS) feature_names = ft.get_feature_names() for feat in feature_names: assert_true("chars__" in feat or "words__" in feat) assert_equal(len(feature_names), 35) def test_classes_property(): iris = load_iris() X = iris.data y = iris.target reg = make_pipeline(SelectKBest(k=1), LinearRegression()) reg.fit(X, y) assert_raises(AttributeError, getattr, reg, "classes_") clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0)) assert_raises(AttributeError, getattr, clf, "classes_") clf.fit(X, y) assert_array_equal(clf.classes_, np.unique(y)) def test_X1d_inverse_transform(): transformer = TransfT() pipeline = make_pipeline(transformer) X = np.ones(10) msg = "1d X will not be reshaped in pipeline.inverse_transform" assert_warns_message(FutureWarning, msg, pipeline.inverse_transform, X)
bsd-3-clause
valexandersaulys/airbnb_kaggle_contest
venv/lib/python3.4/site-packages/sklearn/neighbors/graph.py
208
7031
"""Nearest Neighbors graph functions""" # Author: Jake Vanderplas <vanderplas@astro.washington.edu> # # License: BSD 3 clause (C) INRIA, University of Amsterdam import warnings from .base import KNeighborsMixin, RadiusNeighborsMixin from .unsupervised import NearestNeighbors def _check_params(X, metric, p, metric_params): """Check the validity of the input parameters""" params = zip(['metric', 'p', 'metric_params'], [metric, p, metric_params]) est_params = X.get_params() for param_name, func_param in params: if func_param != est_params[param_name]: raise ValueError( "Got %s for %s, while the estimator has %s for " "the same parameter." % ( func_param, param_name, est_params[param_name])) def _query_include_self(X, include_self, mode): """Return the query based on include_self param""" # Done to preserve backward compatibility. if include_self is None: if mode == "connectivity": warnings.warn( "The behavior of 'kneighbors_graph' when mode='connectivity' " "will change in version 0.18. Presently, the nearest neighbor " "of each sample is the sample itself. Beginning in version " "0.18, the default behavior will be to exclude each sample " "from being its own nearest neighbor. To maintain the current " "behavior, set include_self=True.", DeprecationWarning) include_self = True else: include_self = False if include_self: query = X._fit_X else: query = None return query def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski', p=2, metric_params=None, include_self=None): """Computes the (weighted) graph of k-Neighbors for points in X Read more in the :ref:`User Guide <unsupervised_neighbors>`. Parameters ---------- X : array-like or BallTree, shape = [n_samples, n_features] Sample data, in the form of a numpy array or a precomputed :class:`BallTree`. n_neighbors : int Number of neighbors for each sample. mode : {'connectivity', 'distance'}, optional Type of returned matrix: 'connectivity' will return the connectivity matrix with ones and zeros, in 'distance' the edges are Euclidean distance between points. metric : string, default 'minkowski' The distance metric used to calculate the k-Neighbors for each sample point. The DistanceMetric class gives a list of available metrics. The default distance is 'euclidean' ('minkowski' metric with the p param equal to 2.) include_self: bool, default backward-compatible. Whether or not to mark each sample as the first nearest neighbor to itself. If `None`, then True is used for mode='connectivity' and False for mode='distance' as this will preserve backwards compatibilty. From version 0.18, the default value will be False, irrespective of the value of `mode`. p : int, default 2 Power parameter for the Minkowski metric. When p = 1, this is equivalent to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. metric_params: dict, optional additional keyword arguments for the metric function. Returns ------- A : sparse matrix in CSR format, shape = [n_samples, n_samples] A[i, j] is assigned the weight of edge that connects i to j. Examples -------- >>> X = [[0], [3], [1]] >>> from sklearn.neighbors import kneighbors_graph >>> A = kneighbors_graph(X, 2) >>> A.toarray() array([[ 1., 0., 1.], [ 0., 1., 1.], [ 1., 0., 1.]]) See also -------- radius_neighbors_graph """ if not isinstance(X, KNeighborsMixin): X = NearestNeighbors(n_neighbors, metric=metric, p=p, metric_params=metric_params).fit(X) else: _check_params(X, metric, p, metric_params) query = _query_include_self(X, include_self, mode) return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode) def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski', p=2, metric_params=None, include_self=None): """Computes the (weighted) graph of Neighbors for points in X Neighborhoods are restricted the points at a distance lower than radius. Read more in the :ref:`User Guide <unsupervised_neighbors>`. Parameters ---------- X : array-like or BallTree, shape = [n_samples, n_features] Sample data, in the form of a numpy array or a precomputed :class:`BallTree`. radius : float Radius of neighborhoods. mode : {'connectivity', 'distance'}, optional Type of returned matrix: 'connectivity' will return the connectivity matrix with ones and zeros, in 'distance' the edges are Euclidean distance between points. metric : string, default 'minkowski' The distance metric used to calculate the neighbors within a given radius for each sample point. The DistanceMetric class gives a list of available metrics. The default distance is 'euclidean' ('minkowski' metric with the param equal to 2.) include_self: bool, default None Whether or not to mark each sample as the first nearest neighbor to itself. If `None`, then True is used for mode='connectivity' and False for mode='distance' as this will preserve backwards compatibilty. From version 0.18, the default value will be False, irrespective of the value of `mode`. p : int, default 2 Power parameter for the Minkowski metric. When p = 1, this is equivalent to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. metric_params: dict, optional additional keyword arguments for the metric function. Returns ------- A : sparse matrix in CSR format, shape = [n_samples, n_samples] A[i, j] is assigned the weight of edge that connects i to j. Examples -------- >>> X = [[0], [3], [1]] >>> from sklearn.neighbors import radius_neighbors_graph >>> A = radius_neighbors_graph(X, 1.5) >>> A.toarray() array([[ 1., 0., 1.], [ 0., 1., 0.], [ 1., 0., 1.]]) See also -------- kneighbors_graph """ if not isinstance(X, RadiusNeighborsMixin): X = NearestNeighbors(radius=radius, metric=metric, p=p, metric_params=metric_params).fit(X) else: _check_params(X, metric, p, metric_params) query = _query_include_self(X, include_self, mode) return X.radius_neighbors_graph(query, radius, mode)
gpl-2.0
poryfly/scikit-learn
sklearn/cross_decomposition/cca_.py
209
3150
from .pls_ import _PLS __all__ = ['CCA'] class CCA(_PLS): """CCA Canonical Correlation Analysis. CCA inherits from PLS with mode="B" and deflation_mode="canonical". Read more in the :ref:`User Guide <cross_decomposition>`. Parameters ---------- n_components : int, (default 2). number of components to keep. scale : boolean, (default True) whether to scale the data? max_iter : an integer, (default 500) the maximum number of iterations of the NIPALS inner loop tol : non-negative real, default 1e-06. the tolerance used in the iterative algorithm copy : boolean Whether the deflation be done on a copy. Let the default value to True unless you don't care about side effects Attributes ---------- x_weights_ : array, [p, n_components] X block weights vectors. y_weights_ : array, [q, n_components] Y block weights vectors. x_loadings_ : array, [p, n_components] X block loadings vectors. y_loadings_ : array, [q, n_components] Y block loadings vectors. x_scores_ : array, [n_samples, n_components] X scores. y_scores_ : array, [n_samples, n_components] Y scores. x_rotations_ : array, [p, n_components] X block to latents rotations. y_rotations_ : array, [q, n_components] Y block to latents rotations. n_iter_ : array-like Number of iterations of the NIPALS inner loop for each component. Notes ----- For each component k, find the weights u, v that maximizes max corr(Xk u, Yk v), such that ``|u| = |v| = 1`` Note that it maximizes only the correlations between the scores. The residual matrix of X (Xk+1) block is obtained by the deflation on the current X score: x_score. The residual matrix of Y (Yk+1) block is obtained by deflation on the current Y score. Examples -------- >>> from sklearn.cross_decomposition import CCA >>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]] >>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]] >>> cca = CCA(n_components=1) >>> cca.fit(X, Y) ... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE CCA(copy=True, max_iter=500, n_components=1, scale=True, tol=1e-06) >>> X_c, Y_c = cca.transform(X, Y) References ---------- Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with emphasis on the two-block case. Technical Report 371, Department of Statistics, University of Washington, Seattle, 2000. In french but still a reference: Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris: Editions Technic. See also -------- PLSCanonical PLSSVD """ def __init__(self, n_components=2, scale=True, max_iter=500, tol=1e-06, copy=True): _PLS.__init__(self, n_components=n_components, scale=scale, deflation_mode="canonical", mode="B", norm_y_weights=True, algorithm="nipals", max_iter=max_iter, tol=tol, copy=copy)
bsd-3-clause
ansobolev/regCMPostProc
src/plot.py
1
2816
#!/usr/bin/env python # RegCM postprocessing tool # Copyright (C) 2014 Aliou, Addisu, Kanhu, Andrey # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import numpy as np import matplotlib.pyplot as plt import cartopy import cartopy.crs as ccrs import cartopy.feature as cfeature from value import Value class Plotter(object): def __init__(self, value): self._value = value self.lat, self.lon = value.latlon def plot(self, coastlines=True, countries=True, places=True, title=None, levels = None): if levels is not None: l_min, l_max = levels l = (l_max - l_min) / 10 levels = range(l_min, l_max + l, l) projection = ccrs.PlateCarree() self.fig, self.ax = plt.subplots(subplot_kw={'projection': projection}) if coastlines: self.ax.coastlines('10m') if countries: countries = cfeature.NaturalEarthFeature( scale='110m', category='cultural', name='admin_0_countries') self.ax.add_feature(countries, color='r', alpha=0.1) if places: places = cfeature.NaturalEarthFeature( scale='110m', category='cultural', name='populated_places') self.ax.add_feature(places, color='b', hatch='o') cx = self.ax.contourf(self.lon, self.lat, self._value.data, transform=ccrs.PlateCarree(),cmap='bwr', levels=levels) # To mask out OCEAN or LAND #ax.add_feature(cfeature.OCEAN) #ax.add_feature(cfeature.LAND) self.ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True, linewidth=1, color='blue', alpha=0.5, linestyle='-') self.fig.colorbar(cx) times = self._value.limits['time'] plt.title(self._value.title + ' [' + self._value.units + ']\n' + 'mean between ' + str(times[0]) + ' and ' + str(times[1]) + '\n') def show(self): plt.show() def save(self, filename, format): plt.savefig(filename + '.' + format) def close(self): plt.close(self.fig) if __name__ == "__main__": pass
gpl-3.0
Reagankm/KnockKnock
venv/lib/python3.4/site-packages/matplotlib/testing/image_util.py
11
3765
# This module contains some functionality from the Python Imaging # Library, that has been ported to use Numpy arrays rather than PIL # Image objects. # The Python Imaging Library is # Copyright (c) 1997-2009 by Secret Labs AB # Copyright (c) 1995-2009 by Fredrik Lundh # By obtaining, using, and/or copying this software and/or its # associated documentation, you agree that you have read, understood, # and will comply with the following terms and conditions: # Permission to use, copy, modify, and distribute this software and its # associated documentation for any purpose and without fee is hereby # granted, provided that the above copyright notice appears in all # copies, and that both that copyright notice and this permission notice # appear in supporting documentation, and that the name of Secret Labs # AB or the author not be used in advertising or publicity pertaining to # distribution of the software without specific, written prior # permission. # SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO # THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND # FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR BE LIABLE FOR # ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. from __future__ import (absolute_import, division, print_function, unicode_literals) import six from six.moves import xrange import numpy as np from matplotlib.cbook import deprecated, warn_deprecated warn_deprecated('1.4.0', name='matplotlib.testing.image_util', obj_type='module') @deprecated('1.4.0') def autocontrast(image, cutoff=0): """ Maximize image contrast, based on histogram. This completely ignores the alpha channel. """ assert image.dtype == np.uint8 output_image = np.empty((image.shape[0], image.shape[1], 3), np.uint8) for i in xrange(0, 3): plane = image[:,:,i] output_plane = output_image[:,:,i] h = np.histogram(plane, bins=256)[0] if cutoff: # cut off pixels from both ends of the histogram # get number of pixels n = 0 for ix in xrange(256): n = n + h[ix] # remove cutoff% pixels from the low end cut = n * cutoff / 100 for lo in range(256): if cut > h[lo]: cut = cut - h[lo] h[lo] = 0 else: h[lo] = h[lo] - cut cut = 0 if cut <= 0: break # remove cutoff% samples from the hi end cut = n * cutoff / 100 for hi in xrange(255, -1, -1): if cut > h[hi]: cut = cut - h[hi] h[hi] = 0 else: h[hi] = h[hi] - cut cut = 0 if cut <= 0: break # find lowest/highest samples after preprocessing for lo in xrange(256): if h[lo]: break for hi in xrange(255, -1, -1): if h[hi]: break if hi <= lo: output_plane[:,:] = plane else: scale = 255.0 / (hi - lo) offset = -lo * scale lut = np.arange(256, dtype=np.float) lut *= scale lut += offset lut = lut.clip(0, 255) lut = lut.astype(np.uint8) output_plane[:,:] = lut[plane] return output_image
gpl-2.0
vene/ambra
ambra/cross_validation.py
1
9371
import numbers import time import numpy as np from sklearn.utils import safe_indexing from sklearn.base import is_classifier, clone from sklearn.metrics.scorer import check_scoring from sklearn.externals.joblib import Parallel, delayed, logger from ambra.backports import _num_samples, indexable from sklearn.cross_validation import check_cv def _safe_split(estimator, X, y, indices, train_indices=None): """Create subset of dataset and properly handle kernels.""" if hasattr(estimator, 'kernel') and callable(estimator.kernel): # cannot compute the kernel values with custom function raise ValueError("Cannot use a custom kernel function. " "Precompute the kernel matrix instead.") if not hasattr(X, "shape"): if getattr(estimator, "_pairwise", False): raise ValueError("Precomputed kernels or affinity matrices have " "to be passed as arrays or sparse matrices.") X_subset = [X[idx] for idx in indices] else: if getattr(estimator, "_pairwise", False): # X is a precomputed square kernel matrix if X.shape[0] != X.shape[1]: raise ValueError("X should be a square kernel matrix") if train_indices is None: X_subset = X[np.ix_(indices, indices)] else: X_subset = X[np.ix_(indices, train_indices)] else: X_subset = safe_indexing(X, indices) if y is not None: y_subset = safe_indexing(y, indices) else: y_subset = None return X_subset, y_subset def _score(estimator, X_test, y_test, scorer, **params): """Compute the score of an estimator on a given test set.""" if y_test is None: score = scorer(estimator, X_test, **params) else: score = scorer(estimator, X_test, y_test, **params) if not isinstance(score, numbers.Number): raise ValueError("scoring must return a number, got %s (%s) instead." % (str(score), type(score))) return score def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1, verbose=0, fit_params=None, pre_dispatch='2*n_jobs', scorer_params=None): """Evaluate a score by cross-validation Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like The data to fit. Can be, for example a list, or an array at least 2d. y : array-like, optional, default: None The target variable to try to predict in the case of supervised learning. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. cv : cross-validation generator or int, optional, default: None A cross-validation generator to use. If int, determines the number of folds in StratifiedKFold if y is binary or multiclass and estimator is a classifier, or the number of folds in KFold otherwise. If None, it is equivalent to cv=3. n_jobs : integer, optional The number of CPUs to use to do the computation. -1 means 'all CPUs'. verbose : integer, optional The verbosity level. fit_params : dict, optional Parameters to pass to the fit method of the estimator. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' scorer_params : dict, optional Parameters to pass to the scorer. Can be used for sample weights and sample groups. Returns ------- scores : array of float, shape=(len(list(cv)),) Array of scores of the estimator for each run of the cross validation. """ X, y = indexable(X, y) cv = check_cv(cv, X, y, classifier=is_classifier(estimator)) scorer = check_scoring(estimator, scoring=scoring) # We clone the estimator to make sure that all the folds are # independent, and that it is pickle-able. parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch) scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer, train, test, verbose, None, fit_params, scorer_params) for train, test in cv) return np.array(scores)[:, 0] def _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters, fit_params, scorer_params, return_train_score=False, return_parameters=False): """Fit estimator and compute scores for a given dataset split. Parameters ---------- estimator : estimator object implementing 'fit' The object to use to fit the data. X : array-like of shape at least 2D The data to fit. y : array-like or None The target variable to try to predict in the case of supervised learning. scoring : callable A scorer callable object / function with signature ``scorer(estimator, X, y)``. train : array-like, shape = (n_train_samples,) Indices of training samples. test : array-like, shape = (n_test_samples,) Indices of test samples. verbose : integer The verbosity level. parameters : dict or None Parameters to be set on the estimator. fit_params : dict or None Parameters that will be passed to ``estimator.fit``. scorer_params : dict or None Parameters that will be passed to the scorer. return_train_score : boolean, optional, default: False Compute and return score on training set. return_parameters : boolean, optional, default: False Return parameters that has been used for the estimator. Returns ------- train_score : float, optional Score on training set, returned only if `return_train_score` is `True`. test_score : float Score on test set. n_test_samples : int Number of test samples. scoring_time : float Time spent for fitting and scoring in seconds. parameters : dict or None, optional The parameters that have been evaluated. """ if verbose > 1: if parameters is None: msg = "no parameters to be set" else: msg = '%s' % (', '.join('%s=%s' % (k, v) for k, v in parameters.items())) print("[CV] %s %s" % (msg, (64 - len(msg)) * '.')) # Adjust lenght of sample weights n_samples = _num_samples(X) fit_params = fit_params if fit_params is not None else {} fit_params = dict([(k, np.asarray(v)[train] if hasattr(v, '__len__') and len(v) == n_samples else v) for k, v in fit_params.items()]) # Same, but take both slices scorer_params = scorer_params if scorer_params is not None else {} train_scorer_params = dict([(k, np.asarray(v)[train] if hasattr(v, '__len__') and len(v) == n_samples else v) for k, v in scorer_params.items()]) test_scorer_params = dict([(k, np.asarray(v)[test] if hasattr(v, '__len__') and len(v) == n_samples else v) for k, v in scorer_params.items()]) if parameters is not None: estimator.set_params(**parameters) start_time = time.time() X_train, y_train = _safe_split(estimator, X, y, train) X_test, y_test = _safe_split(estimator, X, y, test, train) if y_train is None: estimator.fit(X_train, **fit_params) else: estimator.fit(X_train, y_train, **fit_params) test_score = _score(estimator, X_test, y_test, scorer, **test_scorer_params) if return_train_score: train_score = _score(estimator, X_train, y_train, scorer, **train_scorer_params) scoring_time = time.time() - start_time if verbose > 2: msg += ", score=%f" % test_score if verbose > 1: end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time)) print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg)) ret = [train_score] if return_train_score else [] ret.extend([test_score, _num_samples(X_test), scoring_time]) if return_parameters: ret.append(parameters) return ret
bsd-2-clause
hstau/manifold-cryo
fit_1D_open_manifold_3D.py
1
5015
import numpy as np import get_fit_1D_open_manifold_3D_param import solve_d_R_d_tau_p_3D import a from scipy.io import loadmat import matplotlib.pyplot as plt #import matplotlib.pyplot as plt ''' function [a,b,tau] = fit_1D_open_manifold_3D(psi) % % fit_1D_open_manifold_3D % % fit the eigenvectors for a 1D open manifold to the model % x_ij = a_j cos(j*pi*tau_i) + b_j. % % j goes from 1 to 3 (this is only for 3D systems). % % i goes from 1 to nS where nS is the number of data points to be fitted. % % For a fixed set of a_j and b_j, j=1:3, tau_i for i=1:nS are % obtained by putting dR/d(tau_i) to zero. % % For a fixed set of tau_i, i=1:nS, a_j and b_j for j=1:3 are % obtained by solving 3 sets of 2x2 linear equations. % % Fit parameters and initial set of {\tau} are specified in % % get_fit_1D_open_manifold_3D_param.m % % copyright (c) Russell Fung 2014 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% global p nDim a b x x_fit ''' ''' def plot_fitted_curve(hFig): global x x_fit h = plt.figure(hFig) hsp = plt.subplot(2,2,1) plot3(x(:,1),x(:,2),x(:,3),'b.','lineWidth',1); hold on plot3(x_fit(:,1),x_fit(:,2),x_fit(:,3),'g.','lineWidth',1); hold off set(hsp,'lineWidth',2,'fontSize',15); hsp = subplot(2,2,2); plotRF(hsp,x(:,1),x(:,2),'','','','b.'); addplotRF(hsp,x_fit(:,1),x_fit(:,2),'g.'); hsp = subplot(2,2,3); plotRF(hsp,x(:,1),x(:,3),'','','','b.'); addplotRF(hsp,x_fit(:,1),x_fit(:,3),'g.'); hsp = subplot(2,2,4); plotRF(hsp,x(:,2),x(:,3),'','','','b.'); addplotRF(hsp,x_fit(:,2),x_fit(:,3),'g.'); drawnow %end ''' eps = 1e-4 #global maxIter,delta_a_max, delta_b_max,delta_tau_max,a_b_tau_result def op(psi): a.init() #global p, nDim, a, b, x, x_fit a.nDim = 3 #tau = get_fit_1D_open_manifold_3D_param tau = get_fit_1D_open_manifold_3D_param.op(psi) aux = np.zeros((tau.shape[0],5)) #added nS = a.x.shape[0] for iter in xrange(1,a.maxIter+1): string ='iteration ' + str(iter) print string ''' #%%%%%%%%%%%%%%%%%%%%% #% solve for a and b % #%%%%%%%%%%%%%%%%%%%%% ''' a_old = a.a b_old = a.b j_pi_tau = np.dot(tau,np.pi*np.array([[1,2,3]])) cos_j_pi_tau = np.cos(j_pi_tau) A11 = np.sum(cos_j_pi_tau**2, axis=0) A12 = np.sum(cos_j_pi_tau, axis=0) A21 = A12 A22 = nS x_cos_j_pi_tau = a.x*cos_j_pi_tau b1 = np.sum(x_cos_j_pi_tau, axis=0) b2 = np.sum(a.x, axis=0) coeff = np.zeros((2,3)) for qq in xrange(3): A = np.array([[A11[qq],A12[qq]],[A21[qq], A22]]) b = np.array([b1[qq], b2[qq]]) coeff[:,qq] = np.linalg.solve(A,b) a.a = coeff[0,:] a.b = coeff[1,:] ''' %%%%%%%%%%%%%%%%%%%%%%%%% #% plot the fitted curve % %%%%%%%%%%%%%%%%%%%%%%%%% ''' j_pi_tau = np.dot(np.linspace(0,1,1000).reshape(-1,1),np.array([[1,2,3]]))*np.pi cos_j_pi_tau = np.cos(j_pi_tau) tmp = a.a*cos_j_pi_tau a.x_fit = tmp + a.b #%plot_fitted_curve(iter) ''' %%%%%%%%%%%%%%%%% #% solve for tau % %%%%%%%%%%%%%%%%% ''' tau_old = tau for a.p in xrange(nS): tau[a.p],beta = solve_d_R_d_tau_p_3D.op() #added for kk in xrange(beta.shape[0]): aux[a.p,kk] = beta[kk] ''' if iter == 0: data = loadmat('aux0.mat') # (this is for < v7.3 elif iter == 1: data = loadmat('aux1.mat') # (this is for < v7.3 else: data = loadmat('aux2.mat') # (this is for < v7.3 imaux = data['aux'] plt.subplot(2, 2, 1) plt.imshow(aux, cmap=plt.get_cmap('gray'),aspect=0.1) plt.title('aux') plt.subplot(2, 2, 2) plt.imshow(imaux, cmap=plt.get_cmap('gray'), aspect=0.1) plt.title('imaux') plt.show() ''' ''' %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% #% calculate the changes in fitting parameters % #%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ''' delta_a = np.fabs(a.a-a_old)/(np.fabs(a.a)+eps) delta_b = np.fabs(a.b-b_old)/(np.fabs(a.b)+eps) delta_tau = np.fabs(tau-tau_old) delta_a = max(delta_a)*100 delta_b = max(delta_b)*100 delta_tau = max(delta_tau) print ' changes in fitting parameters: \n' string = ' amplitudes: '+ str(delta_a) + '\n' + \ ' offsets: ' + str(delta_b) + ' \n' +\ ' values of tau: ' + str(delta_tau) + ' \n' print string if (delta_a<a.delta_a_max) and (delta_b < a.delta_b_max) and (delta_tau < a.delta_tau_max): break return (a.a,a.b,tau)
gpl-2.0
apdjustino/DRCOG_Urbansim
src/opus_gui/results_manager/run/indicator_framework/visualizer/visualizers/matplotlib_lorenzcurve.py
1
10890
# Opus/UrbanSim urban simulation software. # Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington # See opus_core/LICENSE import os, re, sys, time, traceback from copy import copy from opus_gui.results_manager.run.indicator_framework.visualizer.visualizers.abstract_visualization import Visualization from opus_core.logger import logger from numpy import array, arange from numpy import ones, zeros, hstack, vstack from numpy import trapz, trim_zeros from pylab import subplot, plot, show from pylab import xlabel, ylabel, title, text from pylab import MultipleLocator, FormatStrFormatter from pylab import savefig, clf, close class LorenzCurve(Visualization): def __init__(self, source_data, dataset_name, attribute = None, years = None, operation = None, name = None, scale = None, storage_location = None): Visualizer.__init__(self, source_data, dataset_name, [attribute], years, operation, name, storage_location) self._values = None self._ginicoeff = None def is_single_year_indicator_image_type(self): return True def get_file_extension(self): return 'png' def get_visualization_shorthand(self): return 'lorenzcurve' def get_additional_metadata(self): return {} def _create_indicator(self, year): """Create a Lorenz Curve for the given indicator, save it to the cache directory's 'indicators' sub-directory. """ attribute_short = self.get_attribute_alias(attribute = self.attributes[0], year = year) title = attribute_short + ' ' + str(year) if self.run_description is not None: title += '\n' + self.run_description # Do calculation # Make fresh copy with dtype float64 to avoid overflows self._values = array(self._get_indicator(year, wrap = False).astype('float64')) self._compute_lorenz() file_path = self.get_file_path(year = year) self._plot(attribute_short, file_path ); return file_path def _compute_lorenz(self ): ''' Do the lorenz curve computation and save the result in the corresponding class variables ''' self._values.sort() #remove 0 values from array self._values = trim_zeros(self._values,'f') num_values = self._values.size F = arange(1, num_values + 1, 1, "float64")/num_values L = self._values.cumsum(dtype="float64")/sum(self._values) # Add (0, 0) as the first point for completeness (e.g. plotting) origin = array([[0], [0]]) self._values = vstack((F, L)) self._values = hstack((origin, self._values)) # This is the simple form of (0.5 - integral) / 0.5 self._ginicoeff = 1 - 2 * trapz(self._values[1], self._values[0]) def _plot(self, attribute_name, file_path=None ): clf() # Clear existing plot a = self._values[0] * 100 b = self._values[1] * 100 ax = subplot(111) plot(a, a, 'k--', a, b, 'r') ax.set_ylim([0,100]) ax.grid(color='0.5', linestyle=':', linewidth=0.5) xlabel('population') ylabel(attribute_name) title('Lorenz curve') font = {'fontname' : 'Courier', 'color' : 'r', 'fontweight' : 'bold', 'fontsize' : 11 } box = { 'pad' : 6, 'facecolor' : 'w', 'linewidth' : 1, 'fill' : True } text(5, 90, 'Gini coefficient: %(gini)f' % {'gini' : self._ginicoeff}, font, color='k', bbox=box ) majorLocator = MultipleLocator(20) majorFormatter = FormatStrFormatter('%d %%') minorLocator = MultipleLocator(5) ax.xaxis.set_major_locator( majorLocator ) ax.xaxis.set_major_formatter( majorFormatter) ax.xaxis.set_minor_locator( minorLocator ) ax.yaxis.set_major_locator( majorLocator ) ax.yaxis.set_major_formatter( majorFormatter) ax.yaxis.set_minor_locator( minorLocator ) if file_path: savefig(file_path) close() else: show() import os from opus_core.tests import opus_unittest from numpy import allclose from opus_gui.results_manager.run.indicator_framework.test_classes.abstract_indicator_test import AbstractIndicatorTest class Tests(AbstractIndicatorTest): def skip_test_create_indicator(self): indicator_path = os.path.join(self.temp_cache_path, 'indicators') self.assert_(not os.path.exists(indicator_path)) lorenzcurve = LorenzCurve( source_data = self.source_data, attribute = 'opus_core.test.attribute', dataset_name = 'test', years = None ) lorenzcurve.create(False) self.assert_(os.path.exists(indicator_path)) self.assert_(os.path.exists(os.path.join(indicator_path, 'test__lorenzcurve__attribute__1980.png'))) def skip_test_perfect_equality(self): """Perfect equality is when everybody has the same amount of something""" lorenzcurve = LorenzCurve( source_data = self.source_data, attribute = 'opus_core.test.attribute', dataset_name = 'test', years = None ) incomes = ones(100) lorenzcurve._values = incomes lorenzcurve._compute_lorenz() wanted_result = vstack((arange(0, 101) / 100., arange(0, 101) / 100.)) self.assert_(allclose(lorenzcurve._values, wanted_result)) def skip_test_perfect_inequality(self): """Perfect inequality is when one person has all of something""" lorenzcurve = LorenzCurve( source_data = self.source_data, attribute = 'opus_core.test.attribute', dataset_name = 'test', years = None ) incomes = zeros(100) incomes[0] = 42 lorenzcurve._values = incomes lorenzcurve._compute_lorenz() #We strip all the zero values, so the result consists of only two values wanted_result = [[0.,1.],[0.,1.]] self.assert_(allclose(lorenzcurve._values, wanted_result)) def skip_test_small_lorenz(self): """Test case for less than 100 people""" lorenzcurve = LorenzCurve( source_data = self.source_data, attribute = 'opus_core.test.attribute', dataset_name = 'test', years = None ) incomes = array([1, 1, 2, 3, 4, 5]) lorenzcurve._values = incomes lorenzcurve._compute_lorenz() wanted_result = array( [[ 0, 1/6., 2/6., 3/6., 4/6., 5/6., 6/6. ], [ 0, 1/16., 2/16., 4/16., 7/16., 11/16., 16/16. ]]) self.assert_(allclose(lorenzcurve._values, wanted_result)) def skip_test_small_gini(self): """Test case for gini coefficient for the small case""" lorenzcurve = LorenzCurve( source_data = self.source_data, attribute = 'opus_core.test.attribute', dataset_name = 'test', years = None ) incomes = array([1, 1, 2, 3, 4, 5]) lorenzcurve._values = incomes lorenzcurve._compute_lorenz() self.assertAlmostEqual(lorenzcurve._ginicoeff, 0.3125) def skip_test_large_lorenz(self): """Test case for more than 100 people""" lorenzcurve = LorenzCurve( source_data = self.source_data, attribute = 'opus_core.test.attribute', dataset_name = 'test', years = None ) incomes = array([731, 700, 619, 450, 419, 512, 232, 266, 131, 188, 498, 293, 935, 177, 160, 380, 538, 783, 256, 280, 731, 362, 870, 970, 674, 211, 524, 207, 513, 461, 280, 275, 410, 282, 144, 682, 573, 252, 382, 909, 719, 666, 236, 636, 628, 542, 630, 484, 629, 974, 747, 509, 281, 725, 377, 565, 495, 840, 391, 191, 929, 679, 217, 179, 336, 562, 293, 881, 271, 172, 426, 697, 293, 576, 203, 390, 522, 948, 312, 491, 531, 959, 646, 495, 306, 631, 722, 322, 876, 586, 316, 124, 796, 250, 456, 112, 661, 294, 749, 619, 134, 582, 996, 413, 421, 219, 796, 923, 832, 557]) lorenzcurve._values = incomes lorenzcurve._compute_lorenz() wanted_result_F = arange(0, 111) / 110. wanted_result_L = array([ 0, 0.00202803, 0.00427335, 0.00664542, 0.00907181, 0.01167928, 0.01457647, 0.01769094, 0.02089595, 0.02413718, 0.02754138, 0.03099989, 0.0346757 , 0.03842393, 0.04224459, 0.0461739 , 0.05013943, 0.05434035, 0.0586137 , 0.06314055, 0.06770362, 0.07233912, 0.07715569, 0.0820628 , 0.08704234, 0.09211241, 0.09718249, 0.10227067, 0.10737696, 0.11268243, 0.1179879 , 0.12329338, 0.12861696, 0.13415782, 0.13980734, 0.14552928, 0.15135987, 0.15744396, 0.16399884, 0.17082534, 0.17770615, 0.18462318, 0.19168508, 0.19876507, 0.20618911, 0.21366748, 0.22125448, 0.2288777 , 0.23659146, 0.2447398 , 0.25299678, 0.26134429, 0.27010828, 0.27899902, 0.28796219, 0.29692536, 0.30594285, 0.31515953, 0.32443052, 0.33371962, 0.34317169, 0.35265998, 0.36227502, 0.3720168 , 0.38183102, 0.39191685, 0.40209322, 0.41232391, 0.42269945, 0.43312932, 0.44366784, 0.45427878, 0.46548727, 0.47669576, 0.48806721, 0.49945678, 0.51086445, 0.52229023, 0.53380654, 0.54550393, 0.55747293, 0.56953247, 0.58173686, 0.5940318 , 0.60638105, 0.61900192, 0.63167711, 0.64469634, 0.65776989, 0.67089777, 0.68413428, 0.6973708 , 0.71089704, 0.72445949, 0.7386376 , 0.7530511 , 0.7674646 , 0.78252997, 0.79774019, 0.81349364, 0.82935574, 0.84530837, 0.86176801, 0.87848115, 0.89530294, 0.91223337, 0.9293992 , 0.94676421, 0.9643284 , 0.98196502, 1. ]) self.assert_(allclose(lorenzcurve._values, vstack((wanted_result_F, wanted_result_L)))) if __name__ == '__main__': try: import matplotlib except: print 'could not import matplotlib' else: opus_unittest.main()
agpl-3.0
duncanmmacleod/gwpy
gwpy/plot/axes.py
1
21895
# -*- coding: utf-8 -*- # Copyright (C) Duncan Macleod (2018-2020) # # This file is part of GWpy. # # GWpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # GWpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GWpy. If not, see <http://www.gnu.org/licenses/>. """Extension of `~matplotlib.axes.Axes` for gwpy """ import warnings from functools import wraps from math import log from numbers import Number import numpy from astropy.time import Time from matplotlib import rcParams from matplotlib.artist import allow_rasterization from matplotlib.axes import Axes as _Axes from matplotlib.axes._base import _process_plot_var_args from matplotlib.collections import PolyCollection from matplotlib.lines import Line2D from matplotlib.projections import register_projection from . import (Plot, colorbar as gcbar) from .colors import format_norm from .gps import GPS_SCALES from .legend import HandlerLine2D from ..time import to_gps __author__ = 'Duncan Macleod <duncan.macleod@ligo.org>' def log_norm(func): """Wrap ``func`` to handle custom gwpy keywords for a LogNorm colouring """ @wraps(func) def decorated_func(*args, **kwargs): norm, kwargs = format_norm(kwargs) kwargs['norm'] = norm return func(*args, **kwargs) return decorated_func def xlim_as_gps(func): """Wrap ``func`` to handle pass limit inputs through `gwpy.time.to_gps` """ @wraps(func) def wrapped_func(self, left=None, right=None, **kw): if right is None and numpy.iterable(left): left, right = left kw['left'] = left kw['right'] = right gpsscale = self.get_xscale() in GPS_SCALES for key in ('left', 'right'): if gpsscale: try: kw[key] = numpy.longdouble(str(to_gps(kw[key]))) except TypeError: pass return func(self, **kw) return wrapped_func def restore_grid(func): """Wrap ``func`` to preserve the Axes current grid settings. """ @wraps(func) def wrapped_func(self, *args, **kwargs): try: grid = ( self.xaxis._minor_tick_kw["gridOn"], self.xaxis._major_tick_kw["gridOn"], self.yaxis._minor_tick_kw["gridOn"], self.yaxis._major_tick_kw["gridOn"], ) except KeyError: # matplotlib < 3.3.3 grid = (self.xaxis._gridOnMinor, self.xaxis._gridOnMajor, self.yaxis._gridOnMinor, self.yaxis._gridOnMajor) try: return func(self, *args, **kwargs) finally: # reset grid self.xaxis.grid(grid[0], which="minor") self.xaxis.grid(grid[1], which="major") self.yaxis.grid(grid[2], which="minor") self.yaxis.grid(grid[3], which="major") return wrapped_func # -- new Axes ----------------------------------------------------------------- class Axes(_Axes): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # handle Series in `ax.plot()` self._get_lines = PlotArgsProcessor(self) # reset data formatters (for interactive plots) to support # GPS time display self.fmt_xdata = self._fmt_xdata self.fmt_ydata = self._fmt_ydata @allow_rasterization def draw(self, *args, **kwargs): labels = {} for ax in (self.xaxis, self.yaxis): if ax.get_scale() in GPS_SCALES and ax.isDefault_label: labels[ax] = ax.get_label_text() trans = ax.get_transform() epoch = float(trans.get_epoch()) unit = trans.get_unit_name() iso = Time(epoch, format='gps', scale='utc').iso utc = iso.rstrip('0').rstrip('.') ax.set_label_text('Time [{0!s}] from {1!s} UTC ({2!r})'.format( unit, utc, epoch)) try: super().draw(*args, **kwargs) finally: for ax in labels: # reset labels ax.isDefault_label = True # -- auto-gps helpers ----------------------- def _fmt_xdata(self, x): if self.get_xscale() in GPS_SCALES: return str(to_gps(x)) return self.xaxis.get_major_formatter().format_data_short(x) def _fmt_ydata(self, y): if self.get_yscale() in GPS_SCALES: return str(to_gps(y)) return self.yaxis.get_major_formatter().format_data_short(y) set_xlim = xlim_as_gps(_Axes.set_xlim) def set_epoch(self, epoch): """Set the epoch for the current GPS scale. This method will fail if the current X-axis scale isn't one of the GPS scales. See :ref:`gwpy-plot-gps` for more details. Parameters ---------- epoch : `float`, `str` GPS-compatible time or date object, anything parseable by :func:`~gwpy.time.to_gps` is fine. """ scale = self.get_xscale() return self.set_xscale(scale, epoch=epoch) def get_epoch(self): """Return the epoch for the current GPS scale/ This method will fail if the current X-axis scale isn't one of the GPS scales. See :ref:`gwpy-plot-gps` for more details. """ return self.get_xaxis().get_transform().get_epoch() # -- overloaded plotting methods ------------ def scatter(self, x, y, c=None, **kwargs): # scatter with auto-sorting by colour try: if c is None: raise ValueError c_array = numpy.asanyarray(c, dtype=float) except ValueError: # no colour array pass else: c_sort = kwargs.pop('c_sort', True) if c_sort: sortidx = c_array.argsort() x = numpy.asarray(x)[sortidx] y = numpy.asarray(y)[sortidx] c = numpy.asarray(c)[sortidx] return super().scatter(x, y, c=c, **kwargs) scatter.__doc__ = _Axes.scatter.__doc__.replace( 'marker :', 'c_sort : `bool`, optional, default: True\n' ' Sort scatter points by `c` array value, if given.\n\n' 'marker :', ) @log_norm def imshow(self, array, *args, **kwargs): """Display an image, i.e. data on a 2D regular raster. If ``array`` is a :class:`~gwpy.types.Array2D` (e.g. a :class:`~gwpy.spectrogram.Spectrogram`), then the defaults are _different_ to those in the upstream :meth:`~matplotlib.axes.Axes.imshow` method. Namely, the defaults are - ``origin='lower'`` (coordinates start in lower-left corner) - ``aspect='auto'`` (pixels are not forced to be square) - ``interpolation='none'`` (no image interpolation is used) In all other usage, the defaults from the upstream matplotlib method are unchanged. Parameters ---------- array : array-like or PIL image The image data. *args, **kwargs All arguments and keywords are passed to the inherited :meth:`~matplotlib.axes.Axes.imshow` method. See also -------- matplotlib.axes.Axes.imshow for details of the image rendering """ if hasattr(array, "yspan"): # Array2D return self._imshow_array2d(array, *args, **kwargs) image = super().imshow(array, *args, **kwargs) self.autoscale(enable=None, axis='both', tight=None) return image def _imshow_array2d(self, array, origin='lower', interpolation='none', aspect='auto', **kwargs): """Render an `~gwpy.types.Array2D` using `Axes.imshow` """ # NOTE: If you change the defaults for this method, please update # the docstring for `imshow` above. # calculate extent extent = tuple(array.xspan) + tuple(array.yspan) if self.get_xscale() == 'log' and extent[0] == 0.: extent = (1e-300,) + extent[1:] if self.get_yscale() == 'log' and extent[2] == 0.: extent = extent[:2] + (1e-300,) + extent[3:] kwargs.setdefault('extent', extent) return self.imshow(array.value.T, origin=origin, aspect=aspect, interpolation=interpolation, **kwargs) @restore_grid @log_norm def pcolormesh(self, *args, **kwargs): """Create a pseudocolor plot with a non-regular rectangular grid. When using GWpy, this method can be called with a single argument that is an :class:`~gwpy.types.Array2D`, for which the ``X`` and ``Y`` coordinate arrays will be determined from the indexing. In all other usage, all ``args`` and ``kwargs`` are passed directly to :meth:`~matplotlib.axes.Axes.pcolormesh`. Notes ----- Unlike the upstream :meth:`matplotlib.axes.Axes.pcolormesh`, this method respects the current grid settings. See also -------- matplotlib.axes.Axes.pcolormesh """ if len(args) == 1 and hasattr(args[0], "yindex"): # Array2D return self._pcolormesh_array2d(*args, **kwargs) return super().pcolormesh(*args, **kwargs) def _pcolormesh_array2d(self, array, *args, **kwargs): """Render an `~gwpy.types.Array2D` using `Axes.pcolormesh` """ x = numpy.concatenate((array.xindex.value, array.xspan[-1:])) y = numpy.concatenate((array.yindex.value, array.yspan[-1:])) xcoord, ycoord = numpy.meshgrid(x, y, copy=False, sparse=True) return self.pcolormesh(xcoord, ycoord, array.value.T, *args, **kwargs) def hist(self, x, *args, **kwargs): x = numpy.asarray(x) # re-format weights as array if given as float weights = kwargs.get('weights', None) if isinstance(weights, Number): kwargs['weights'] = numpy.ones_like(x) * weights # calculate log-spaced bins on-the-fly if (kwargs.pop('logbins', False) and not numpy.iterable(kwargs.get('bins', None))): nbins = kwargs.get('bins', None) or rcParams.get('hist.bins', 30) # get range hrange = kwargs.pop('range', None) if hrange is None: try: hrange = numpy.min(x), numpy.max(x) except ValueError as exc: if str(exc).startswith('zero-size array'): # no data exc.args = ('cannot generate log-spaced histogram ' 'bins for zero-size array, ' 'please pass `bins` or `range` manually',) raise # log-scale the axis and extract the base if kwargs.get('orientation') == 'horizontal': self.set_yscale('log', nonposy='clip') logbase = self.yaxis._scale.base else: self.set_xscale('log', nonposx='clip') logbase = self.xaxis._scale.base # generate the bins kwargs['bins'] = numpy.logspace( log(hrange[0], logbase), log(hrange[1], logbase), nbins+1, endpoint=True) return super().hist(x, *args, **kwargs) hist.__doc__ = _Axes.hist.__doc__.replace( 'color :', 'logbins : boolean, optional\n' ' If ``True``, use logarithmically-spaced histogram bins.\n\n' ' Default is ``False``\n\n' 'color :') # -- new plotting methods ------------------- def plot_mmm(self, data, lower=None, upper=None, **kwargs): """Plot a `Series` as a line, with a shaded region around it. The ``data`` `Series` is drawn, while the ``lower`` and ``upper`` `Series` are plotted lightly below and above, with a fill between them and the ``data``. All three `Series` should have the same `~Series.index` array. Parameters ---------- data : `~gwpy.types.Series` Data to plot normally. lower : `~gwpy.types.Series` Lower boundary (on Y-axis) for shade. upper : `~gwpy.types.Series` Upper boundary (on Y-axis) for shade. **kwargs Any other keyword arguments acceptable for :meth:`~matplotlib.Axes.plot`. Returns ------- artists : `tuple` All of the drawn artists: - `~matplotlib.lines.Line2d` for ``data``, - `~matplotlib.lines.Line2D` for ``lower``, if given - `~matplotlib.lines.Line2D` for ``upper``, if given - `~matplitlib.collections.PolyCollection` for shading See also -------- matplotlib.axes.Axes.plot for a full description of acceptable ``*args`` and ``**kwargs`` """ alpha = kwargs.pop('alpha', .1) # plot mean line, = self.plot(data, **kwargs) out = [line] # modify keywords for shading kwargs.update({ 'label': '', 'linewidth': line.get_linewidth() / 2, 'color': line.get_color(), 'alpha': alpha * 2, }) # plot lower and upper Series fill = [data.xindex.value, data.value, data.value] for i, bound in enumerate((lower, upper)): if bound is not None: out.extend(self.plot(bound, **kwargs)) fill[i+1] = bound.value # fill between out.append(self.fill_between( *fill, alpha=alpha, color=kwargs['color'], rasterized=kwargs.get('rasterized', True))) return out def tile(self, x, y, w, h, color=None, anchor='center', edgecolors='face', linewidth=0.8, **kwargs): """Plot rectanguler tiles based onto these `Axes`. ``x`` and ``y`` give the anchor point for each tile, with ``w`` and ``h`` giving the extent in the X and Y axis respectively. Parameters ---------- x, y, w, h : `array_like`, shape (n, ) Input data color : `array_like`, shape (n, ) Array of amplitudes for tile color anchor : `str`, optional Anchor point for tiles relative to ``(x, y)`` coordinates, one of - ``'center'`` - center tile on ``(x, y)`` - ``'ll'`` - ``(x, y)`` defines lower-left corner of tile - ``'lr'`` - ``(x, y)`` defines lower-right corner of tile - ``'ul'`` - ``(x, y)`` defines upper-left corner of tile - ``'ur'`` - ``(x, y)`` defines upper-right corner of tile **kwargs Other keywords are passed to :meth:`~matplotlib.collections.PolyCollection` Returns ------- collection : `~matplotlib.collections.PolyCollection` the collection of tiles drawn Examples -------- >>> import numpy >>> from matplotlib import pyplot >>> import gwpy.plot # to get gwpy's Axes >>> x = numpy.arange(10) >>> y = numpy.arange(x.size) >>> w = numpy.ones_like(x) * .8 >>> h = numpy.ones_like(x) * .8 >>> fig = pyplot.figure() >>> ax = fig.gca() >>> ax.tile(x, y, w, h, anchor='ll') >>> pyplot.show() """ # get color and sort if color is not None and kwargs.get('c_sort', True): sortidx = color.argsort() x = x[sortidx] y = y[sortidx] w = w[sortidx] h = h[sortidx] color = color[sortidx] # define how to make a polygon for each tile if anchor == 'll': def _poly(x, y, w, h): return ((x, y), (x, y+h), (x+w, y+h), (x+w, y)) elif anchor == 'lr': def _poly(x, y, w, h): return ((x-w, y), (x-w, y+h), (x, y+h), (x, y)) elif anchor == 'ul': def _poly(x, y, w, h): return ((x, y-h), (x, y), (x+w, y), (x+w, y-h)) elif anchor == 'ur': def _poly(x, y, w, h): return ((x-w, y-h), (x-w, y), (x, y), (x, y-h)) elif anchor == 'center': def _poly(x, y, w, h): return ((x-w/2., y-h/2.), (x-w/2., y+h/2.), (x+w/2., y+h/2.), (x+w/2., y-h/2.)) else: raise ValueError("Unrecognised tile anchor {!r}".format(anchor)) # build collection cmap = kwargs.pop('cmap', rcParams['image.cmap']) coll = PolyCollection((_poly(*tile) for tile in zip(x, y, w, h)), edgecolors=edgecolors, linewidth=linewidth, **kwargs) if color is not None: coll.set_array(color) coll.set_cmap(cmap) out = self.add_collection(coll) self.autoscale_view() return out # -- overloaded auxiliary methods ----------- def legend(self, *args, **kwargs): # handle deprecated keywords linewidth = kwargs.pop("linewidth", None) if linewidth: warnings.warn( "the linewidth keyword to gwpy.plot.Axes.legend has been " "deprecated and will be removed in a future release; " "please update your code to use a custom legend handler, " "e.g. gwpy.plot.legend.HandlerLine2D.", DeprecationWarning, ) alpha = kwargs.pop("alpha", None) if alpha: kwargs.setdefault("framealpha", alpha) warnings.warn( "the alpha keyword to gwpy.plot.Axes.legend has been " "deprecated and will be removed in a future release; " "use framealpha instead.", DeprecationWarning, ) # build custom handler handler_map = kwargs.setdefault("handler_map", dict()) if isinstance(handler_map, dict): handler_map.setdefault(Line2D, HandlerLine2D(linewidth or 6)) # create legend return super().legend(*args, **kwargs) legend.__doc__ = _Axes.legend.__doc__.replace( "Call signatures", """.. note:: This method uses a custom default legend handler for `~matplotlib.lines.Line2D` objects, with increased linewidth relative to the upstream :meth:`~matplotlib.axes.Axes.legend` method. To disable this, pass ``handler_map=None``, or create and pass your own handler class. See :ref:`gwpy-plot-legend` for more details. Call signatures""", ) def colorbar(self, mappable=None, **kwargs): """Add a `~matplotlib.colorbar.Colorbar` to these `Axes` Parameters ---------- mappable : matplotlib data collection, optional collection against which to map the colouring, default will be the last added mappable artist (collection or image) fraction : `float`, optional fraction of space to steal from these `Axes` to make space for the new axes, default is ``0.`` if ``use_axesgrid=True`` is given (default), otherwise default is ``.15`` to match the upstream matplotlib default. **kwargs other keyword arguments to be passed to the :meth:`Plot.colorbar` generator Returns ------- cbar : `~matplotlib.colorbar.Colorbar` the newly added `Colorbar` See also -------- Plot.colorbar """ fig = self.get_figure() if kwargs.get('use_axesgrid', True): kwargs.setdefault('fraction', 0.) if kwargs.get('fraction', 0.) == 0.: kwargs.setdefault('use_axesgrid', True) mappable, kwargs = gcbar.process_colorbar_kwargs( fig, mappable=mappable, ax=self, **kwargs) if isinstance(fig, Plot): # either we have created colorbar Axes using axesgrid1, or # the user already gave use_axesgrid=False, so we forcefully # disable axesgrid here in case fraction == 0., which causes # gridspec colorbars to fail. kwargs['use_axesgrid'] = False return fig.colorbar(mappable, **kwargs) # override default Axes with this one by registering a projection with the # same name register_projection(Axes) # -- overload Axes.plot() to handle Series ------------------------------------ class PlotArgsProcessor(_process_plot_var_args): """This class controls how ax.plot() works """ def __call__(self, *args, **kwargs): """Find `Series` data in `plot()` args and unwrap """ newargs = [] while args: # strip first argument this, args = args[:1], args[1:] # it its a 1-D Series, then parse it as (xindex, value) if hasattr(this[0], "xindex") and this[0].ndim == 1: this = (this[0].xindex.value, this[0].value) # otherwise treat as normal (must be a second argument) else: this += args[:1] args = args[1:] # allow colour specs if args and isinstance(args[0], str): this += args[0], args = args[1:] newargs.extend(this) return super().__call__(*newargs, **kwargs)
gpl-3.0
jereze/scikit-learn
benchmarks/bench_rcv1_logreg_convergence.py
149
7173
# Authors: Tom Dupre la Tour <tom.dupre-la-tour@m4x.org> # Olivier Grisel <olivier.grisel@ensta.org> # # License: BSD 3 clause import matplotlib.pyplot as plt import numpy as np import gc import time from sklearn.externals.joblib import Memory from sklearn.linear_model import (LogisticRegression, SGDClassifier) from sklearn.datasets import fetch_rcv1 from sklearn.linear_model.sag import get_auto_step_size from sklearn.linear_model.sag_fast import get_max_squared_sum try: import lightning.classification as lightning_clf except ImportError: lightning_clf = None m = Memory(cachedir='.', verbose=0) # compute logistic loss def get_loss(w, intercept, myX, myy, C): n_samples = myX.shape[0] w = w.ravel() p = np.mean(np.log(1. + np.exp(-myy * (myX.dot(w) + intercept)))) print("%f + %f" % (p, w.dot(w) / 2. / C / n_samples)) p += w.dot(w) / 2. / C / n_samples return p # We use joblib to cache individual fits. Note that we do not pass the dataset # as argument as the hashing would be too slow, so we assume that the dataset # never changes. @m.cache() def bench_one(name, clf_type, clf_params, n_iter): clf = clf_type(**clf_params) try: clf.set_params(max_iter=n_iter, random_state=42) except: clf.set_params(n_iter=n_iter, random_state=42) st = time.time() clf.fit(X, y) end = time.time() try: C = 1.0 / clf.alpha / n_samples except: C = clf.C try: intercept = clf.intercept_ except: intercept = 0. train_loss = get_loss(clf.coef_, intercept, X, y, C) train_score = clf.score(X, y) test_score = clf.score(X_test, y_test) duration = end - st return train_loss, train_score, test_score, duration def bench(clfs): for (name, clf, iter_range, train_losses, train_scores, test_scores, durations) in clfs: print("training %s" % name) clf_type = type(clf) clf_params = clf.get_params() for n_iter in iter_range: gc.collect() train_loss, train_score, test_score, duration = bench_one( name, clf_type, clf_params, n_iter) train_losses.append(train_loss) train_scores.append(train_score) test_scores.append(test_score) durations.append(duration) print("classifier: %s" % name) print("train_loss: %.8f" % train_loss) print("train_score: %.8f" % train_score) print("test_score: %.8f" % test_score) print("time for fit: %.8f seconds" % duration) print("") print("") return clfs def plot_train_losses(clfs): plt.figure() for (name, _, _, train_losses, _, _, durations) in clfs: plt.plot(durations, train_losses, '-o', label=name) plt.legend(loc=0) plt.xlabel("seconds") plt.ylabel("train loss") def plot_train_scores(clfs): plt.figure() for (name, _, _, _, train_scores, _, durations) in clfs: plt.plot(durations, train_scores, '-o', label=name) plt.legend(loc=0) plt.xlabel("seconds") plt.ylabel("train score") plt.ylim((0.92, 0.96)) def plot_test_scores(clfs): plt.figure() for (name, _, _, _, _, test_scores, durations) in clfs: plt.plot(durations, test_scores, '-o', label=name) plt.legend(loc=0) plt.xlabel("seconds") plt.ylabel("test score") plt.ylim((0.92, 0.96)) def plot_dloss(clfs): plt.figure() pobj_final = [] for (name, _, _, train_losses, _, _, durations) in clfs: pobj_final.append(train_losses[-1]) indices = np.argsort(pobj_final) pobj_best = pobj_final[indices[0]] for (name, _, _, train_losses, _, _, durations) in clfs: log_pobj = np.log(abs(np.array(train_losses) - pobj_best)) / np.log(10) plt.plot(durations, log_pobj, '-o', label=name) plt.legend(loc=0) plt.xlabel("seconds") plt.ylabel("log(best - train_loss)") rcv1 = fetch_rcv1() X = rcv1.data n_samples, n_features = X.shape # consider the binary classification problem 'CCAT' vs the rest ccat_idx = rcv1.target_names.tolist().index('CCAT') y = rcv1.target.tocsc()[:, ccat_idx].toarray().ravel().astype(np.float64) y[y == 0] = -1 # parameters C = 1. fit_intercept = True tol = 1.0e-14 # max_iter range sgd_iter_range = list(range(1, 121, 10)) newton_iter_range = list(range(1, 25, 3)) lbfgs_iter_range = list(range(1, 242, 12)) liblinear_iter_range = list(range(1, 37, 3)) liblinear_dual_iter_range = list(range(1, 85, 6)) sag_iter_range = list(range(1, 37, 3)) clfs = [ ("LR-liblinear", LogisticRegression(C=C, tol=tol, solver="liblinear", fit_intercept=fit_intercept, intercept_scaling=1), liblinear_iter_range, [], [], [], []), ("LR-liblinear-dual", LogisticRegression(C=C, tol=tol, dual=True, solver="liblinear", fit_intercept=fit_intercept, intercept_scaling=1), liblinear_dual_iter_range, [], [], [], []), ("LR-SAG", LogisticRegression(C=C, tol=tol, solver="sag", fit_intercept=fit_intercept), sag_iter_range, [], [], [], []), ("LR-newton-cg", LogisticRegression(C=C, tol=tol, solver="newton-cg", fit_intercept=fit_intercept), newton_iter_range, [], [], [], []), ("LR-lbfgs", LogisticRegression(C=C, tol=tol, solver="lbfgs", fit_intercept=fit_intercept), lbfgs_iter_range, [], [], [], []), ("SGD", SGDClassifier(alpha=1.0 / C / n_samples, penalty='l2', loss='log', fit_intercept=fit_intercept, verbose=0), sgd_iter_range, [], [], [], [])] if lightning_clf is not None and not fit_intercept: alpha = 1. / C / n_samples # compute the same step_size than in LR-sag max_squared_sum = get_max_squared_sum(X) step_size = get_auto_step_size(max_squared_sum, alpha, "log", fit_intercept) clfs.append( ("Lightning-SVRG", lightning_clf.SVRGClassifier(alpha=alpha, eta=step_size, tol=tol, loss="log"), sag_iter_range, [], [], [], [])) clfs.append( ("Lightning-SAG", lightning_clf.SAGClassifier(alpha=alpha, eta=step_size, tol=tol, loss="log"), sag_iter_range, [], [], [], [])) # We keep only 200 features, to have a dense dataset, # and compare to lightning SAG, which seems incorrect in the sparse case. X_csc = X.tocsc() nnz_in_each_features = X_csc.indptr[1:] - X_csc.indptr[:-1] X = X_csc[:, np.argsort(nnz_in_each_features)[-200:]] X = X.toarray() print("dataset: %.3f MB" % (X.nbytes / 1e6)) # Split training and testing. Switch train and test subset compared to # LYRL2004 split, to have a larger training dataset. n = 23149 X_test = X[:n, :] y_test = y[:n] X = X[n:, :] y = y[n:] clfs = bench(clfs) plot_train_scores(clfs) plot_test_scores(clfs) plot_train_losses(clfs) plot_dloss(clfs) plt.show()
bsd-3-clause
GitYiheng/reinforcement_learning_test
test00_previous_files/mountaincar_q_learning.py
1
4304
import gym import os import sys import numpy as np import matplotlib import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from gym import wrappers from datetime import datetime from sklearn.pipeline import FeatureUnion from sklearn.preprocessing import StandardScaler from sklearn.kernel_approximation import RBFSampler from sklearn.linear_model import SGDRegressor class FeatureTransformer: def __init__(self, env, n_components=500): observation_examples = np.array([env.observation_space.sample() for x in range(1000)]) scaler = StandardScaler() scaler.fit(observation_examples) featurizer = FeatureUnion([ ("rbf1", RBFSampler(gamma=5.0, n_components=n_components)), ("rbf2", RBFSampler(gamma=4.0, n_components=n_components)), ("rbf3", RBFSampler(gamma=3.0, n_components=n_components)), ("rbf4", RBFSampler(gamma=2.0, n_components=n_components)), ("rbf5", RBFSampler(gamma=1.0, n_components=n_components)), ("rbf6", RBFSampler(gamma=0.5, n_components=n_components)), ]) example_features = featurizer.fit_transform(scaler.transform(observation_examples)) self.dimensions = example_features.shape[1] self.scaler = scaler self.featurizer = featurizer def transform(self, observation): scaled = self.scaler.transform(observation) return self.featurizer.transform(scaled) class Model: def __init__(self, env, feature_transformer, learning_rate): self.env = env self.models = [] self.feature_transformer = feature_transformer for i in range(env.action_space.n): model = SGDRegressor(learning_rate) model.partial_fit(feature_transformer.transform([env.reset()]), [0]) self.models.append(model) def predict(self, s): X = self.feature_transformer.transform([s]) assert(len(X.shape) == 2) return np.array([m.predict(X)[0] for m in self.models]) def update(self, s, a, G): X = self.feature_transformer.transform([s]) assert(len(X.shape) == 2) self.models[a].partial_fit(X, [G]) def sample_action(self, s, eps): if np.random.random() < eps: return self.env.action_space.sample() else: return np.argmax(self.predict(s)) def play_one(model, eps, gamma): observation = env.reset() done = False totalreward = 0 iters = 0 while not done and iters < 1000: action = model.sample_action(observation, eps) prev_observation = observation observation, reward, done, info = env.step(action) # Update the model G = reward + gamma*np.max(model.predict(observation)[0]) model.update(prev_observation, action, G) totalreward += reward iters += 1 return totalreward def plot_cost_to_go(env, estimator, num_tiles=20): x = np.linspace(env.observation_space.low[0], env.observation_space.high[0], num=num_tiles) y = np.linspace(env.observation_space.low[1], env.observation_space.high[1], num=num_tiles) X, Y = np.meshgrid(x, y) Z = np.apply_along_axis(lambda _: -np.max(estimator.predict(_)), 2, np.dstack([X, Y])) fig = plt.figure(figsize=(10, 5)) ax = fig.add_subplot(111, projection='3d') surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=matplotlib.cm.coolwarm, vmin=-1.0, vmax=1.0) ax.set_xlabel('Position') ax.set_ylabel('Velocity') ax.set_zlabel('Cost-To-Go == -V(s)') ax.set_title("Cost-To-Go Function") fig.colorbar(surf) plt.show() def plot_running_avg(totalrewards): N = len(totalrewards) running_avg = np.empty(N) for t in range(N): running_avg[t] = totalrewards[max(0, t-100):(t+1)].mean() plt.plot(running_avg) plt.title("Running Average") plt.show() def main(): env = gym.make('MountainCar-v0') ft = FeatureTransformer(env) model = Model(env, ft, "constant") gamma = 0.99 if 'monitor' in sys.argv: filename = os.path.basename(__file__).split('.')[0] monitor_dir = './' + filename + '_' + str(datetime.now()) env = wrappers.Monitor(env, monitor_dir) N = 300 totalrewards = np.empty(N) for n in range(N): eps = 0.1*(0.97**n) totalreward = play_one(model, eps, gamma) totalrewards[n] = totalreward print("episode:", n, "total reward:", totalreward) print("avg reward for last 100 episodes:", totalrewards[-100:].mean()) print("total steps:", -totalrewards.sum()) plt.plot(totalrewards) plt.title("Rewards") plt.show() plot_running_avg(totalrewards) plot_cost_to_go(env, model) if __name__ == '__main__': main()
mit
Unidata/MetPy
v0.9/_downloads/8591910a2b42dadcf3b05658ddd9c600/isentropic_example.py
2
7222
# Copyright (c) 2017,2018 MetPy Developers. # Distributed under the terms of the BSD 3-Clause License. # SPDX-License-Identifier: BSD-3-Clause """ =================== Isentropic Analysis =================== The MetPy function `mpcalc.isentropic_interpolation` allows for isentropic analysis from model analysis data in isobaric coordinates. """ ######################################## import cartopy.crs as ccrs import cartopy.feature as cfeature import matplotlib.pyplot as plt import numpy as np import xarray as xr import metpy.calc as mpcalc from metpy.cbook import get_test_data from metpy.plots import add_metpy_logo, add_timestamp from metpy.units import units ####################################### # **Getting the data** # # In this example, NARR reanalysis data for 18 UTC 04 April 1987 from the National Centers # for Environmental Information (https://www.ncdc.noaa.gov/data-access/model-data) # will be used. data = xr.open_dataset(get_test_data('narr_example.nc', False)) ########################## print(list(data.variables)) ############################# # We will reduce the dimensionality of the data as it is pulled in to remove an empty time # dimension. # Assign data to variable names lat = data['lat'] lon = data['lon'] lev = data['isobaric'] times = data['time'] tmp = data['Temperature'][0] uwnd = data['u_wind'][0] vwnd = data['v_wind'][0] spech = data['Specific_humidity'][0] # pint doesn't understand gpm data['Geopotential_height'].attrs['units'] = 'meter' hgt = data['Geopotential_height'][0] ############################# # To properly interpolate to isentropic coordinates, the function must know the desired output # isentropic levels. An array with these levels will be created below. isentlevs = [296.] * units.kelvin #################################### # **Conversion to Isentropic Coordinates** # # Once three dimensional data in isobaric coordinates has been pulled and the desired # isentropic levels created, the conversion to isentropic coordinates can begin. Data will be # passed to the function as below. The function requires that isentropic levels, isobaric # levels, and temperature be input. Any additional inputs (in this case relative humidity, u, # and v wind components) will be linearly interpolated to isentropic space. isent_anal = mpcalc.isentropic_interpolation(isentlevs, lev, tmp, spech, uwnd, vwnd, hgt, tmpk_out=True) ##################################### # The output is a list, so now we will separate the variables to different names before # plotting. isentprs, isenttmp, isentspech, isentu, isentv, isenthgt = isent_anal isentu.ito('kt') isentv.ito('kt') ######################################## # A quick look at the shape of these variables will show that the data is now in isentropic # coordinates, with the number of vertical levels as specified above. print(isentprs.shape) print(isentspech.shape) print(isentu.shape) print(isentv.shape) print(isenttmp.shape) print(isenthgt.shape) ################################# # **Converting to Relative Humidity** # # The NARR only gives specific humidity on isobaric vertical levels, so relative humidity will # have to be calculated after the interpolation to isentropic space. isentrh = 100 * mpcalc.relative_humidity_from_specific_humidity(isentspech, isenttmp, isentprs) ####################################### # **Plotting the Isentropic Analysis** # Set up our projection crs = ccrs.LambertConformal(central_longitude=-100.0, central_latitude=45.0) # Coordinates to limit map area bounds = [(-122., -75., 25., 50.)] # Choose a level to plot, in this case 296 K level = 0 fig = plt.figure(figsize=(17., 12.)) add_metpy_logo(fig, 120, 245, size='large') ax = fig.add_subplot(1, 1, 1, projection=crs) ax.set_extent(*bounds, crs=ccrs.PlateCarree()) ax.add_feature(cfeature.COASTLINE.with_scale('50m'), linewidth=0.75) ax.add_feature(cfeature.STATES, linewidth=0.5) # Plot the surface clevisent = np.arange(0, 1000, 25) cs = ax.contour(lon, lat, isentprs[level, :, :], clevisent, colors='k', linewidths=1.0, linestyles='solid', transform=ccrs.PlateCarree()) ax.clabel(cs, fontsize=10, inline=1, inline_spacing=7, fmt='%i', rightside_up=True, use_clabeltext=True) # Plot RH cf = ax.contourf(lon, lat, isentrh[level, :, :], range(10, 106, 5), cmap=plt.cm.gist_earth_r, transform=ccrs.PlateCarree()) cb = fig.colorbar(cf, orientation='horizontal', extend='max', aspect=65, shrink=0.5, pad=0.05, extendrect='True') cb.set_label('Relative Humidity', size='x-large') # Plot wind barbs ax.barbs(lon.values, lat.values, isentu[level, :, :].m, isentv[level, :, :].m, length=6, regrid_shape=20, transform=ccrs.PlateCarree()) # Make some titles ax.set_title('{:.0f} K Isentropic Pressure (hPa), Wind (kt), Relative Humidity (percent)' .format(isentlevs[level].m), loc='left') add_timestamp(ax, times[0].dt, y=0.02, high_contrast=True) fig.tight_layout() ###################################### # **Montgomery Streamfunction** # # The Montgomery Streamfunction, :math:`{\psi} = gdz + CpT`, is often desired because its # gradient is proportional to the geostrophic wind in isentropic space. This can be easily # calculated with `mpcalc.montgomery_streamfunction`. # Calculate Montgomery Streamfunction and scale by 10^-2 for plotting msf = mpcalc.montgomery_streamfunction(isenthgt, isenttmp) / 100. # Choose a level to plot, in this case 296 K level = 0 fig = plt.figure(figsize=(17., 12.)) add_metpy_logo(fig, 120, 250, size='large') ax = plt.subplot(111, projection=crs) ax.set_extent(*bounds, crs=ccrs.PlateCarree()) ax.add_feature(cfeature.COASTLINE.with_scale('50m'), linewidth=0.75) ax.add_feature(cfeature.STATES.with_scale('50m'), linewidth=0.5) # Plot the surface clevmsf = np.arange(0, 4000, 5) cs = ax.contour(lon, lat, msf[level, :, :], clevmsf, colors='k', linewidths=1.0, linestyles='solid', transform=ccrs.PlateCarree()) ax.clabel(cs, fontsize=10, inline=1, inline_spacing=7, fmt='%i', rightside_up=True, use_clabeltext=True) # Plot RH cf = ax.contourf(lon, lat, isentrh[level, :, :], range(10, 106, 5), cmap=plt.cm.gist_earth_r, transform=ccrs.PlateCarree()) cb = fig.colorbar(cf, orientation='horizontal', extend='max', aspect=65, shrink=0.5, pad=0.05, extendrect='True') cb.set_label('Relative Humidity', size='x-large') # Plot wind barbs. ax.barbs(lon.values, lat.values, isentu[level, :, :].m, isentv[level, :, :].m, length=6, regrid_shape=20, transform=ccrs.PlateCarree()) # Make some titles ax.set_title('{:.0f} K Montgomery Streamfunction '.format(isentlevs[level].m) + r'($10^{-2} m^2 s^{-2}$), ' + 'Wind (kt), Relative Humidity (percent)', loc='left') add_timestamp(ax, times[0].dt, y=0.02, pretext='Valid: ', high_contrast=True) fig.tight_layout() plt.show()
bsd-3-clause
pglomski/shopnotes
drill_speed_chart.py
1
2778
#!/usr/bin/env python # -*- coding: UTF-8 -*- '''Produce a custom twist drill plot''' import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt plt.rc('text', usetex=True) # set some rcParams mpl.rcParams['font.weight'] = 'bold' mpl.rcParams['xtick.major.pad'] = 10 mpl.rcParams['xtick.direction'] = 'inout' mpl.rcParams['xtick.labelsize'] = 26 mpl.rcParams['ytick.direction'] = 'inout' mpl.rcParams['ytick.labelsize'] = 20 # define the constants for our chart materials = [ ('Acrylic' , 650 , 'c' , '-' ) , ('Aluminum' , 300 , 'b' , '-' ) , ('Brass' , 200 , 'g' , '-' ) , ('LC Steel' , 110 , 'k' , '-' ) , ('Wood' , 100 , 'brown' , '-' ) , ('MC Steel' , 80 , 'darkgray' , '-' ) , ('HC Steel' , 60 , 'lightgray' , '-' ) , ('Stainless' , 50 , 'purple' , '-' ) , ] drill_speeds = [250, 340, 390, 510, 600, 650, 990, 1550, 1620, 1900, 2620, 3100] #rpm speed_lims = (200., 4000.) # rpm max_in = 1. # in. incr = 1./16. # in. im_sz = 25. # in. ratio = 8.5/11. fig = plt.figure(figsize=(im_sz,ratio * im_sz), dpi=600) fig.patch.set_alpha(0) # generate a vector of drill bit diameter x = np.array([float(i) * incr for i in range(1,int(max_in/incr) + 1)]) # in. # calculate the drill speed curve for each material type and plot the curve for name, speed, color, linestyle in materials: plt.loglog(x, 12/np.pi/x*speed, label=name, linewidth=5, color=color, linestyle=linestyle) ax = plt.gca() # adjust the axis tick locators to match drill press speeds ax.yaxis.set_major_locator(mpl.ticker.FixedLocator(drill_speeds)) ax.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter('%4d')) ax.yaxis.set_minor_locator(mpl.ticker.NullLocator()) ax.set_ylim(speed_lims) # set the drill diameter locators and format the ticks with LaTeX ax.xaxis.set_major_locator(mpl.ticker.MultipleLocator(base=incr)) ax.xaxis.set_minor_locator(mpl.ticker.NullLocator()) ax.set_xlim((incr, max_in)) ticks = ['0', r'$$\frac{1}{16}$$' , r'$$\frac{1}{8}$$' , r'$$\frac{3}{16}$$' , r'$$\frac{1}{4}$$' , r'$$\frac{5}{16}$$' , r'$$\frac{3}{8}$$' , r'$$\frac{7}{16}$$' , r'$$\frac{1}{2}$$' , r'$$\frac{9}{16}$$' , r'$$\frac{5}{8}$$' , r'$$\frac{11}{16}$$' , r'$$\frac{3}{4}$$' , r'$$\frac{13}{16}$$' , r'$$\frac{7}{8}$$' , r'$$\frac{15}{16}$$' , r'$$1$$' ] ax.xaxis.set_ticklabels(ticks) # Add the Texts plt.xlabel('Bit Diameter (in.)', fontsize=26) plt.ylabel('Drill Speed (rpm)' , fontsize=26) plt.title('Twist Drill Speeds' , fontsize=50) plt.legend(ncol=2, loc=3, fontsize=40) plt.grid('on') plt.savefig('drill_speed_chart.png')
agpl-3.0
crichardson17/starburst_atlas
Low_resolution_sims/DustFree_LowRes/Geneva_noRot_cont/Geneva_noRot_cont_age5/UV2.py
33
7365
import csv import matplotlib.pyplot as plt from numpy import * import scipy.interpolate import math from pylab import * from matplotlib.ticker import MultipleLocator, FormatStrFormatter import matplotlib.patches as patches from matplotlib.path import Path import os # ------------------------------------------------------------------------------------------------------ #inputs for file in os.listdir('.'): if file.endswith(".grd"): inputfile = file for file in os.listdir('.'): if file.endswith(".txt"): inputfile2 = file # ------------------------------------------------------------------------------------------------------ #Patches data #for the Kewley and Levesque data verts = [ (1., 7.97712125471966000000), # left, bottom (1., 9.57712125471966000000), # left, top (2., 10.57712125471970000000), # right, top (2., 8.97712125471966000000), # right, bottom (0., 0.), # ignored ] codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY, ] path = Path(verts, codes) # ------------------------ #for the Kewley 01 data verts2 = [ (2.4, 9.243038049), # left, bottom (2.4, 11.0211893), # left, top (2.6, 11.0211893), # right, top (2.6, 9.243038049), # right, bottom (0, 0.), # ignored ] path = Path(verts, codes) path2 = Path(verts2, codes) # ------------------------- #for the Moy et al data verts3 = [ (1., 6.86712125471966000000), # left, bottom (1., 10.18712125471970000000), # left, top (3., 12.18712125471970000000), # right, top (3., 8.86712125471966000000), # right, bottom (0., 0.), # ignored ] path = Path(verts, codes) path3 = Path(verts3, codes) # ------------------------------------------------------------------------------------------------------ #the routine to add patches for others peoples' data onto our plots. def add_patches(ax): patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0) patch2 = patches.PathPatch(path2, facecolor='green', lw=0) patch = patches.PathPatch(path, facecolor='red', lw=0) ax1.add_patch(patch3) ax1.add_patch(patch2) ax1.add_patch(patch) # ------------------------------------------------------------------------------------------------------ #the subplot routine def add_sub_plot(sub_num): numplots = 16 plt.subplot(numplots/4.,4,sub_num) rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear') zi = rbf(xi, yi) contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed') contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5) plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*') plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10) plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10) if sub_num == numplots / 2.: print "half the plots are complete" #axis limits yt_min = 8 yt_max = 23 xt_min = 0 xt_max = 12 plt.ylim(yt_min,yt_max) plt.xlim(xt_min,xt_max) plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10) plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10) if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]: plt.tick_params(labelleft = 'off') else: plt.tick_params(labelleft = 'on') plt.ylabel('Log ($ \phi _{\mathrm{H}} $)') if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]: plt.tick_params(labelbottom = 'off') else: plt.tick_params(labelbottom = 'on') plt.xlabel('Log($n _{\mathrm{H}} $)') if sub_num == 1: plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10) if sub_num == 13: plt.yticks(arange(yt_min,yt_max,1),fontsize=10) plt.xticks(arange(xt_min,xt_max,1), fontsize = 10) if sub_num == 16 : plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10) # --------------------------------------------------- #this is where the grid information (phi and hdens) is read in and saved to grid. grid = []; with open(inputfile, 'rb') as f: csvReader = csv.reader(f,delimiter='\t') for row in csvReader: grid.append(row); grid = asarray(grid) #here is where the data for each line is read in and saved to dataEmissionlines dataEmissionlines = []; with open(inputfile2, 'rb') as f: csvReader = csv.reader(f,delimiter='\t') headers = csvReader.next() for row in csvReader: dataEmissionlines.append(row); dataEmissionlines = asarray(dataEmissionlines) print "import files complete" # --------------------------------------------------- #for grid phi_values = grid[1:len(dataEmissionlines)+1,6] hdens_values = grid[1:len(dataEmissionlines)+1,7] #for lines headers = headers[1:] Emissionlines = dataEmissionlines[:, 1:] concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0]))) max_values = zeros((len(Emissionlines[0]),4)) #select the scaling factor #for 1215 #incident = Emissionlines[1:,4] #for 4860 incident = Emissionlines[:,57] #take the ratio of incident and all the lines and put it all in an array concatenated_data for i in range(len(Emissionlines)): for j in range(len(Emissionlines[0])): if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0: concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) else: concatenated_data[i,j] == 0 # for 1215 #for i in range(len(Emissionlines)): # for j in range(len(Emissionlines[0])): # if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0: # concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) # else: # concatenated_data[i,j] == 0 #find the maxima to plot onto the contour plots for j in range(len(concatenated_data[0])): max_values[j,0] = max(concatenated_data[:,j]) max_values[j,1] = argmax(concatenated_data[:,j], axis = 0) max_values[j,2] = hdens_values[max_values[j,1]] max_values[j,3] = phi_values[max_values[j,1]] #to round off the maxima max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ] print "data arranged" # --------------------------------------------------- #Creating the grid to interpolate with for contours. gridarray = zeros((len(Emissionlines),2)) gridarray[:,0] = hdens_values gridarray[:,1] = phi_values x = gridarray[:,0] y = gridarray[:,1] #change desired lines here! line = [18, #1549 19, #1640 20, #1665 21, #1671 23, #1750 24, #1860 25, #1888 26, #1907 27, #2297 28, #2321 29, #2471 30, #2326 31, #2335 32, #2665 33, #2798 34] #2803 #create z array for this plot z = concatenated_data[:,line[:]] # --------------------------------------------------- # Interpolate print "starting interpolation" xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10) xi, yi = meshgrid(xi, yi) # --------------------------------------------------- print "interpolatation complete; now plotting" #plot plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots levels = arange(10**-1,10, .2) levels2 = arange(10**-2,10**2, 1) plt.suptitle("UV Lines Continued", fontsize=14) # --------------------------------------------------- for i in range(16): add_sub_plot(i) ax1 = plt.subplot(4,4,1) add_patches(ax1) print "complete" plt.savefig('UV_Lines_cntd.pdf') plt.clf()
gpl-2.0
giorgiop/scikit-learn
sklearn/linear_model/__init__.py
83
3139
""" The :mod:`sklearn.linear_model` module implements generalized linear models. It includes Ridge regression, Bayesian Regression, Lasso and Elastic Net estimators computed with Least Angle Regression and coordinate descent. It also implements Stochastic Gradient Descent related algorithms. """ # See http://scikit-learn.sourceforge.net/modules/sgd.html and # http://scikit-learn.sourceforge.net/modules/linear_model.html for # complete documentation. from .base import LinearRegression from .bayes import BayesianRidge, ARDRegression from .least_angle import (Lars, LassoLars, lars_path, LarsCV, LassoLarsCV, LassoLarsIC) from .coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV, lasso_path, enet_path, MultiTaskLasso, MultiTaskElasticNet, MultiTaskElasticNetCV, MultiTaskLassoCV) from .huber import HuberRegressor from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber from .stochastic_gradient import SGDClassifier, SGDRegressor from .ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV, ridge_regression) from .logistic import (LogisticRegression, LogisticRegressionCV, logistic_regression_path) from .omp import (orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit, OrthogonalMatchingPursuitCV) from .passive_aggressive import PassiveAggressiveClassifier from .passive_aggressive import PassiveAggressiveRegressor from .perceptron import Perceptron from .randomized_l1 import (RandomizedLasso, RandomizedLogisticRegression, lasso_stability_path) from .ransac import RANSACRegressor from .theil_sen import TheilSenRegressor __all__ = ['ARDRegression', 'BayesianRidge', 'ElasticNet', 'ElasticNetCV', 'Hinge', 'HuberRegressor', 'Lars', 'LarsCV', 'Lasso', 'LassoCV', 'LassoLars', 'LassoLarsCV', 'LassoLarsIC', 'LinearRegression', 'Log', 'LogisticRegression', 'LogisticRegressionCV', 'ModifiedHuber', 'MultiTaskElasticNet', 'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV', 'OrthogonalMatchingPursuit', 'OrthogonalMatchingPursuitCV', 'PassiveAggressiveClassifier', 'PassiveAggressiveRegressor', 'Perceptron', 'RandomizedLasso', 'RandomizedLogisticRegression', 'Ridge', 'RidgeCV', 'RidgeClassifier', 'RidgeClassifierCV', 'SGDClassifier', 'SGDRegressor', 'SquaredLoss', 'TheilSenRegressor', 'enet_path', 'lars_path', 'lasso_path', 'lasso_stability_path', 'logistic_regression_path', 'orthogonal_mp', 'orthogonal_mp_gram', 'ridge_regression', 'RANSACRegressor']
bsd-3-clause
BhallaLab/moose-full
moose-examples/snippets/switchKineticSolvers.py
2
5089
######################################################################### ## This program is part of 'MOOSE', the ## Messaging Object Oriented Simulation Environment. ## Copyright (C) 2014 Upinder S. Bhalla. and NCBS ## It is made available under the terms of the ## GNU Lesser General Public License version 2.1 ## See the file COPYING.LIB for the full notice. ######################################################################### import moose import pylab import numpy import matplotlib.pyplot as plt import sys def runAndSavePlots( name ): runtime = 20.0 moose.reinit() moose.start( runtime ) pa = moose.Neutral( '/model/graphs/' + name ) for x in moose.wildcardFind( '/model/#graphs/conc#/#' ): if ( x.tick != -1 ): tabname = '/model/graphs/' + name + '/' + x.name + '.' + name y = moose.Table( tabname ) y.vector = x.vector y.tick = -1 # Takes args ee, gsl, or gssa def switchSolvers( solver ): if ( moose.exists( 'model/kinetics/stoich' ) ): moose.delete( '/model/kinetics/stoich' ) moose.delete( '/model/kinetics/ksolve' ) compt = moose.element( '/model/kinetics' ) if ( solver == 'gsl' ): ksolve = moose.Ksolve( '/model/kinetics/ksolve' ) if ( solver == 'gssa' ): ksolve = moose.Gsolve( '/model/kinetics/ksolve' ) if ( solver != 'ee' ): stoich = moose.Stoich( '/model/kinetics/stoich' ) stoich.compartment = compt stoich.ksolve = ksolve stoich.path = "/model/kinetics/##" def main(): """ At zero order, you can select the solver you want to use within the function moose.loadModel( filename, modelpath, solver ). Having loaded in the model, you can change the solver to use on it. This example illustrates how to assign and change solvers for a kinetic model. This process is necessary in two situations: * If we want to change the numerical method employed, for example, from deterministic to stochastic. * If we are already using a solver, and we have changed the reaction network by adding or removing molecules or reactions. Note that we do not have to change the solvers if the volume or reaction rates change. In this example the model is loaded in with a gsl solver. The sequence of solver calculations is: #. gsl #. ee #. gsl #. gssa #. gsl If you're removing the solvers, you just delete the stoichiometry object and the associated ksolve/gsolve. Should there be diffusion (a dsolve)then you should delete that too. If you're building the solvers up again, then you must do the following steps in order: #. build up the ksolve/gsolve and stoich (any order) #. Assign stoich.ksolve #. Assign stoich.path. See the Reaction-diffusion section should you want to do diffusion as well. """ solver = "gsl" # Pick any of gsl, gssa, ee.. mfile = '../genesis/kkit_objects_example.g' modelId = moose.loadModel( mfile, 'model', solver ) # Increase volume so that the stochastic solver gssa # gives an interesting output compt = moose.element( '/model/kinetics' ) compt.volume = 1e-19 runAndSavePlots( 'gsl' ) ######################################################### switchSolvers( 'ee' ) runAndSavePlots( 'ee' ) ######################################################### switchSolvers( 'gsl' ) runAndSavePlots( 'gsl2' ) ######################################################### switchSolvers( 'gssa' ) runAndSavePlots( 'gssa' ) ######################################################### switchSolvers( 'gsl' ) runAndSavePlots( 'gsl3' ) ######################################################### # Display all plots. fig = plt.figure( figsize = (12, 10) ) orig = fig.add_subplot( 511 ) gsl = fig.add_subplot( 512 ) ee = fig.add_subplot( 513 ) gsl2 = fig.add_subplot( 514 ) gssa = fig.add_subplot( 515 ) plotdt = moose.element( '/clock' ).tickDt[18] for x in moose.wildcardFind( '/model/#graphs/conc#/#' ): t = numpy.arange( 0, x.vector.size, 1 ) * plotdt orig.plot( t, x.vector, label=x.name ) for x in moose.wildcardFind( '/model/graphs/gsl/#' ): t = numpy.arange( 0, x.vector.size, 1 ) * plotdt gsl.plot( t, x.vector, label=x.name ) for x in moose.wildcardFind( '/model/graphs/ee/#' ): t = numpy.arange( 0, x.vector.size, 1 ) * plotdt ee.plot( t, x.vector, label=x.name ) for x in moose.wildcardFind( '/model/graphs/gsl2/#' ): t = numpy.arange( 0, x.vector.size, 1 ) * plotdt gsl2.plot( t, x.vector, label=x.name ) for x in moose.wildcardFind( '/model/graphs/gssa/#' ): t = numpy.arange( 0, x.vector.size, 1 ) * plotdt gssa.plot( t, x.vector, label=x.name ) plt.legend() pylab.show() quit() # Run the 'main' if this script is executed standalone. if __name__ == '__main__': main()
gpl-2.0
phev8/ward-metrics
wardmetrics/visualisations.py
1
16641
import matplotlib.pyplot as plt def plot_events_with_segment_scores(segment_results, ground_truth_events, detected_events, use_datetime_x=False, show=True): """ Test :param segment_results: :param ground_truth_events: :param detected_events: :param use_datetime_x: :param show: :return: """ fig = plt.figure(figsize=(10, 3)) a = 3 # TODO: convert times to datetime if flag is set # write y axis labels for ground truth and detections plt.yticks([0.2, 0.5, 0.8], ["detections", "segment score", "actual events"]) plt.ylim([0, 1]) for d in detected_events: plt.axvspan(d[0], d[1], 0, 0.5) for gt in ground_truth_events: plt.axvspan(gt[0], gt[1], 0.5, 1) for s in segment_results: color = "black" index_of_cat = 4 if s[index_of_cat] == "TP": color = "green" elif s[index_of_cat] == "FP": color = "red" elif s[index_of_cat] == "FN": color = "yellow" elif s[index_of_cat] == "TN": color = "blue" # TODO: format text nicely plt.text((s[1]+s[0])/2, 0.8, s[2], horizontalalignment='center', verticalalignment='center') plt.text((s[1]+s[0])/2, 0.2, s[3], horizontalalignment='center', verticalalignment='center') plt.text((s[1]+s[0])/2, 0.5, s[5], horizontalalignment='center', verticalalignment='center') plt.axvspan(s[0], s[1], 0.4, 0.6, color=color) plt.axvline(s[0], color="black") plt.axvline(s[1], color="black") plt.tight_layout() if show: plt.show() else: plt.draw() def plot_events_with_event_scores(gt_event_scores, detected_event_scores, ground_truth_events, detected_events, show=True): fig = plt.figure(figsize=(10, 3)) for i in range(len(detected_events)): d = detected_events[i] plt.axvspan(d[0], d[1], 0, 0.5) plt.text((d[1] + d[0]) / 2, 0.2, detected_event_scores[i], horizontalalignment='center', verticalalignment='center') for i in range(len(ground_truth_events)): gt = ground_truth_events[i] plt.axvspan(gt[0], gt[1], 0.5, 1) plt.text((gt[1] + gt[0]) / 2, 0.8, gt_event_scores[i], horizontalalignment='center', verticalalignment='center') plt.tight_layout() if show: plt.show() else: plt.draw() def plot_twoset_metrics(results, startangle=120): fig1, axarr = plt.subplots(1, 2) # plot positive rates: labels_1 = ["tpr", "us", "ue", "fr", "dr"] values_1 = [ results["tpr"], results["us"], results["ue"], results["fr"], results["dr"] ] axarr[0].pie(values_1, labels=labels_1, autopct='%1.0f%%', startangle=startangle) axarr[0].axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle. # TODO: add title # plot negative rates: labels_2 = ["1-fpr", "os", "oe", "mr", "ir"] values_2 = [ 1-results["fpr"], results["os"], results["oe"], results["mr"], results["ir"] ] axarr[1].pie(values_2, labels=labels_2, autopct='%1.0f%%', startangle=startangle) axarr[1].axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle. # TODO: add title plt.show() def plot_segment_counts(results): # TODO: add title labels = results.keys() values = [] for label in labels: values.append(results[label]) #explode = (0, 0.1, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs') total = sum(values) fig1, ax1 = plt.subplots() ax1.pie(values, labels=labels, autopct=lambda p: '{:.0f}'.format(p * total / 100), startangle=90) ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle. plt.show() def plot_event_analysis_diagram(event_results, **kwargs): """ Plot the event analysis diagram (EAD) for the given results Visualisation of the distribution of specific error types either with the actual event count or showing the percentage of the total events. Elements of the plot can be adjusted (like color, fontsize etc.) Args: event_results (dictionary): Dictionary containing event counts for "total_gt", "total_det", "D", "F", "FM", "M", "C", "M'", "FM'", "F'", "I'" as returned by core_methods.event_metrics' third value Keyword Arguments: fontsize (int): Size of the text inside the bar plot (Reduce the value if some event types are too short) use_percentage (bool): whether percentage values or to show actual event counts on the chart (default: False) show (bool): whether to call plt.show (blocking) or plt.draw() for later displaying (default: True) color_deletion: any matplotlib color for deletion events color_fragmented: any matplotlib color for fragmented ground truth events color_fragmented_merged: any matplotlib color for merged and fragmented ground truth events color_merged: any matplotlib color for merged ground truth events color_correct: any matplotlib color for correct events color_merging: any matplotlib color for merging detection events color_merging_fragmenting: any matplotlib color for merging and fragmenting detection events color_fragmenting: any matplotlib color for merging detection events color_insertion: any matplotlib color for insertion events Returns: matplotlib Figure: matplotlib figure reference """ fig = plt.figure(figsize=(10, 2)) total = event_results["total_gt"] + event_results["total_det"] - event_results["C"] # Layout settings: y_min = 0.3 y_max = 0.7 width = 0.02 text_x_offset = 0 text_y_pos_1 = 0.55 text_y_pos_2 = 0.4 fontsize = kwargs.pop('fontsize', 10) fontsize_extern = 12 use_percentage = kwargs.pop('use_percentage', False) # Color settings: cmap = plt.get_cmap("Paired") color_deletion = kwargs.pop('color_deletion', cmap(4)) color_fragmented = kwargs.pop('color_fragmented', cmap(6)) color_fragmented_merged = kwargs.pop('color_fragmented_merged', cmap(0)) color_merged = kwargs.pop('color_merged', cmap(8)) color_correct = kwargs.pop('color_correct', cmap(3)) color_merging = kwargs.pop('color_merging', cmap(9)) color_merging_fragmenting = kwargs.pop('color_merging_fragmenting', cmap(1)) color_fragmenting = kwargs.pop('color_fragmenting', cmap(7)) color_insertion = kwargs.pop('color_insertion', cmap(5)) # Show deletions: current_score = "D" current_x_start = 0 current_x_end = event_results[current_score] plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_deletion) if event_results[current_score] > 0: plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize, horizontalalignment='center', verticalalignment='center') if use_percentage: plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, "{:.0f}".format(event_results[current_score]*100/event_results["total_gt"]) + "%", fontsize=fontsize, horizontalalignment='center', verticalalignment='center') else: plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]), fontsize=fontsize, horizontalalignment='center', verticalalignment='center') # Show fragmented events: current_score = "F" current_x_start = current_x_end current_x_end += event_results[current_score] plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_fragmented) if event_results[current_score] > 0: plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize, horizontalalignment='center', verticalalignment='center') if use_percentage: plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, "{:.0f}".format(event_results[current_score] * 100 / event_results["total_gt"]) + "%", fontsize=fontsize, horizontalalignment='center', verticalalignment='center') else: plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]), fontsize=fontsize, horizontalalignment='center', verticalalignment='center') # Show fragmented and merged events: current_score = "FM" current_x_start = current_x_end current_x_end += event_results[current_score] plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_fragmented_merged) if event_results[current_score] > 0: plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize, horizontalalignment='center', verticalalignment='center') if use_percentage: plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, "{:.0f}".format(event_results[current_score] * 100 / event_results["total_gt"]) + "%", fontsize=fontsize, horizontalalignment='center', verticalalignment='center') else: plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]), fontsize=fontsize, horizontalalignment='center', verticalalignment='center') # Show merged events: current_score = "M" current_x_start = current_x_end current_x_end += event_results[current_score] plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_merged) if event_results[current_score] > 0: plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize, horizontalalignment='center', verticalalignment='center') if use_percentage: plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, "{:.0f}".format(event_results[current_score] * 100 / event_results["total_gt"]) + "%", fontsize=fontsize, horizontalalignment='center', verticalalignment='center') else: plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]), fontsize=fontsize, horizontalalignment='center', verticalalignment='center') # Show correct events: current_score = "C" current_x_start = current_x_end current_x_end += event_results[current_score] plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_correct) if event_results[current_score] > 0: plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize, horizontalalignment='center', verticalalignment='center') if use_percentage: plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, "{:.0f}".format(event_results[current_score] * 100 / event_results["total_gt"]) + "%/" + "{:.0f}".format(event_results[current_score] * 100 / event_results["total_det"]) + "%", fontsize=fontsize, horizontalalignment='center', verticalalignment='center') else: plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]), fontsize=fontsize, horizontalalignment='center', verticalalignment='center') # Show merging detections: current_score = "M'" current_x_start = current_x_end current_x_end += event_results[current_score] plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_merging) if event_results[current_score] > 0: plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize, horizontalalignment='center', verticalalignment='center') if use_percentage: plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, "{:.0f}".format(event_results[current_score] * 100 / event_results["total_det"]) + "%", fontsize=fontsize, horizontalalignment='center', verticalalignment='center') else: plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]), fontsize=fontsize, horizontalalignment='center', verticalalignment='center') # Show fragmenting and merging detections: current_score = "FM'" current_x_start = current_x_end current_x_end += event_results[current_score] plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_merging_fragmenting) if event_results[current_score] > 0: plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize, horizontalalignment='center', verticalalignment='center') if use_percentage: plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, "{:.0f}".format(event_results[current_score] * 100 / event_results["total_det"]) + "%", fontsize=fontsize, horizontalalignment='center', verticalalignment='center') else: plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]), fontsize=fontsize, horizontalalignment='center', verticalalignment='center') # Show fragmenting detections: current_score = "F'" current_x_start = current_x_end current_x_end += event_results[current_score] plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_fragmenting) if event_results[current_score] > 0: plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize, horizontalalignment='center', verticalalignment='center') if use_percentage: plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, "{:.0f}".format(event_results[current_score] * 100 / event_results["total_det"]) + "%", fontsize=fontsize, horizontalalignment='center', verticalalignment='center') else: plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]), fontsize=fontsize, horizontalalignment='center', verticalalignment='center') # Show insertions: current_score = "I'" current_x_start = current_x_end current_x_end += event_results[current_score] plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_insertion) if event_results[current_score] > 0: plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize, horizontalalignment='center', verticalalignment='center') if use_percentage: plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, "{:.0f}".format(event_results[current_score] * 100 / event_results["total_det"]) + "%", fontsize=fontsize, horizontalalignment='center', verticalalignment='center') else: plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]), fontsize=fontsize, horizontalalignment='center', verticalalignment='center') # Draw line for total events: plt.axvspan(0, event_results["total_gt"], y_max, y_max + width, color="black") plt.axvspan( total - event_results["total_det"], total, y_min, y_min - width, color="black") plt.text((0 + event_results["total_gt"]) / 2, 0.8, "Actual events (total=" + str(event_results["total_gt"]) + ")", fontsize=fontsize_extern, horizontalalignment='center', verticalalignment='center') plt.text((2*total - event_results["total_det"]) / 2, 0.18, "Detected events (total=" + str(event_results["total_det"]) + ")", horizontalalignment='center', fontsize=fontsize_extern, verticalalignment='center') plt.tight_layout() if kwargs.pop('show', True): plt.show() else: plt.draw() return fig
mit
CCI-Tools/cate-core
cate/ops/index.py
1
8641
# The MIT License (MIT) # Copyright (c) 2016, 2017 by the ESA CCI Toolbox development team and contributors # # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies # of the Software, and to permit persons to whom the Software is furnished to do # so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """ Description =========== Index calculation operations Functions ========= """ import xarray as xr import pandas as pd from cate.core.op import op, op_input from cate.ops.select import select_var from cate.ops.subset import subset_spatial from cate.ops.anomaly import anomaly_external from cate.core.types import PolygonLike, VarName, ValidationError from cate.util.monitor import Monitor _ALL_FILE_FILTER = dict(name='All Files', extensions=['*']) @op(tags=['index']) @op_input('file', file_open_mode='r', file_filters=[dict(name='NetCDF', extensions=['nc']), _ALL_FILE_FILTER]) @op_input('var', value_set_source='ds', data_type=VarName) def enso_nino34(ds: xr.Dataset, var: VarName.TYPE, file: str, threshold: float = None, monitor: Monitor = Monitor.NONE) -> pd.DataFrame: """ Calculate nino34 index, which is defined as a five month running mean of anomalies of monthly means of SST data in Nino3.4 region:: lon_min=-170 lat_min=-5 lon_max=-120 lat_max=5. :param ds: A monthly SST dataset :param file: Path to the reference data file e.g. a climatology. A suitable reference dataset can be generated using the long_term_average operation :param var: Dataset variable (geophysial quantity) to use for index calculation. :param threshold: If given, boolean El Nino/La Nina timeseries will be calculated and added to the output dataset according to the given threshold. Where anomaly larger than the positive value of the threshold indicates El Nino and anomaly smaller than the negative of the given threshold indicates La Nina. :param monitor: a progress monitor. :return: A dataset that contains the index timeseries. """ n34 = '-170, -5, -120, 5' name = 'ENSO N3.4 Index' return _generic_index_calculation(ds, var, n34, 5, file, name, threshold, monitor) @op(tags=['index']) @op_input('var', value_set_source='ds', data_type=VarName) @op_input('file', file_open_mode='r', file_filters=[dict(name='NetCDF', extensions=['nc']), _ALL_FILE_FILTER]) @op_input('region', value_set=['N1+2', 'N3', 'N34', 'N4', 'custom']) @op_input('custom_region', data_type=PolygonLike) def enso(ds: xr.Dataset, var: VarName.TYPE, file: str, region: str = 'n34', custom_region: PolygonLike.TYPE = None, threshold: float = None, monitor: Monitor = Monitor.NONE) -> pd.DataFrame: """ Calculate ENSO index, which is defined as a five month running mean of anomalies of monthly means of SST data in the given region. :param ds: A monthly SST dataset :param file: Path to the reference data file e.g. a climatology. A suitable reference dataset can be generated using the long_term_average operation :param var: Dataset variable to use for index calculation :param region: Region for index calculation, the default is Nino3.4 :param custom_region: If 'custom' is chosen as the 'region', this parameter has to be provided to set the desired region. :param threshold: If given, boolean El Nino/La Nina timeseries will be calculated and added to the output dataset, according to the given threshold. Where anomaly larger than then positive value of the threshold indicates El Nino and anomaly smaller than the negative of the given threshold indicates La Nina. :param monitor: a progress monitor. :return: A dataset that contains the index timeseries. """ regions = {'N1+2': '-90, -10, -80, 0', 'N3': '-150, -5, -90, 5', 'N3.4': '-170, -5, -120, 5', 'N4': '160, -5, -150, 5', 'custom': custom_region} converted_region = PolygonLike.convert(regions[region]) if not converted_region: raise ValidationError('No region has been provided to ENSO index calculation') name = 'ENSO ' + region + ' Index' if 'custom' == region: name = 'ENSO Index over ' + PolygonLike.format(converted_region) return _generic_index_calculation(ds, var, converted_region, 5, file, name, threshold, monitor) @op(tags=['index']) @op_input('var', value_set_source='ds', data_type=VarName) @op_input('file', file_open_mode='r', file_filters=[dict(name='NetCDF', extensions=['nc']), _ALL_FILE_FILTER]) def oni(ds: xr.Dataset, var: VarName.TYPE, file: str, threshold: float = None, monitor: Monitor = Monitor.NONE) -> pd.DataFrame: """ Calculate ONI index, which is defined as a three month running mean of anomalies of monthly means of SST data in the Nino3.4 region. :param ds: A monthly SST dataset :param file: Path to the reference data file e.g. a climatology. A suitable reference dataset can be generated using the long_term_average operation :param var: Dataset variable to use for index calculation :param threshold: If given, boolean El Nino/La Nina timeseries will be calculated and added to the output dataset, according to the given threshold. Where anomaly larger than then positive value of the threshold indicates El Nino and anomaly smaller than the negative of the given threshold indicates La Nina. :param monitor: a progress monitor. :return: A dataset that containts the index timeseries """ n34 = '-170, -5, -120, 5' name = 'ONI Index' return _generic_index_calculation(ds, var, n34, 3, file, name, threshold, monitor) def _generic_index_calculation(ds: xr.Dataset, var: VarName.TYPE, region: PolygonLike.TYPE, window: int, file: str, name: str, threshold: float = None, monitor: Monitor = Monitor.NONE) -> pd.DataFrame: """ A generic index calculation. Where an index is defined as an anomaly against the given reference of a moving average of the given window size of the given given region of the given variable of the given dataset. :param ds: Dataset from which to calculate the index :param var: Variable from which to calculate index :param region: Spatial subset from which to calculate the index :param window: Window size for the moving average :param file: Path to the reference file :param threshold: Absolute threshold that indicates an ENSO event :param name: Name of the index :param monitor: a progress monitor. :return: A dataset that contains the index timeseries """ var = VarName.convert(var) region = PolygonLike.convert(region) with monitor.starting("Calculate the index", total_work=2): ds = select_var(ds, var) ds_subset = subset_spatial(ds, region) anom = anomaly_external(ds_subset, file, monitor=monitor.child(1)) with monitor.child(1).observing("Calculate mean"): ts = anom.mean(dim=['lat', 'lon']) df = pd.DataFrame(data=ts[var].values, columns=[name], index=ts.time.values) retval = df.rolling(window=window, center=True).mean().dropna() if threshold is None: return retval retval['El Nino'] = pd.Series((retval[name] > threshold), index=retval.index) retval['La Nina'] = pd.Series((retval[name] < -threshold), index=retval.index) return retval
mit
eickenberg/scikit-learn
sklearn/cluster/tests/test_mean_shift.py
19
2844
""" Testing for mean shift clustering methods """ import numpy as np from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_false from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_array_equal from sklearn.cluster import MeanShift from sklearn.cluster import mean_shift from sklearn.cluster import estimate_bandwidth from sklearn.cluster import get_bin_seeds from sklearn.datasets.samples_generator import make_blobs n_clusters = 3 centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10 X, _ = make_blobs(n_samples=300, n_features=2, centers=centers, cluster_std=0.4, shuffle=True, random_state=11) def test_estimate_bandwidth(): """Test estimate_bandwidth""" bandwidth = estimate_bandwidth(X, n_samples=200) assert_true(0.9 <= bandwidth <= 1.5) def test_mean_shift(): """ Test MeanShift algorithm """ bandwidth = 1.2 ms = MeanShift(bandwidth=bandwidth) labels = ms.fit(X).labels_ cluster_centers = ms.cluster_centers_ labels_unique = np.unique(labels) n_clusters_ = len(labels_unique) assert_equal(n_clusters_, n_clusters) cluster_centers, labels = mean_shift(X, bandwidth=bandwidth) labels_unique = np.unique(labels) n_clusters_ = len(labels_unique) assert_equal(n_clusters_, n_clusters) def test_meanshift_predict(): """Test MeanShift.predict""" ms = MeanShift(bandwidth=1.2) labels = ms.fit_predict(X) labels2 = ms.predict(X) assert_array_equal(labels, labels2) def test_unfitted(): """Non-regression: before fit, there should be not fitted attributes.""" ms = MeanShift() assert_false(hasattr(ms, "cluster_centers_")) assert_false(hasattr(ms, "labels_")) def test_bin_seeds(): """ Test the bin seeding technique which can be used in the mean shift algorithm """ # Data is just 6 points in the plane X = np.array([[1., 1.], [1.5, 1.5], [1.8, 1.2], [2., 1.], [2.1, 1.1], [0., 0.]]) # With a bin coarseness of 1.0 and min_bin_freq of 1, 3 bins should be # found ground_truth = set([(1., 1.), (2., 1.), (0., 0.)]) test_bins = get_bin_seeds(X, 1, 1) test_result = set([tuple(p) for p in test_bins]) assert_true(len(ground_truth.symmetric_difference(test_result)) == 0) # With a bin coarseness of 1.0 and min_bin_freq of 2, 2 bins should be # found ground_truth = set([(1., 1.), (2., 1.)]) test_bins = get_bin_seeds(X, 1, 2) test_result = set([tuple(p) for p in test_bins]) assert_true(len(ground_truth.symmetric_difference(test_result)) == 0) # With a bin size of 0.01 and min_bin_freq of 1, 6 bins should be found test_bins = get_bin_seeds(X, 0.01, 1) test_result = set([tuple(p) for p in test_bins]) assert_true(len(test_result) == 6)
bsd-3-clause
jseabold/statsmodels
examples/python/contrasts.py
5
9020
# coding: utf-8 # DO NOT EDIT # Autogenerated from the notebook contrasts.ipynb. # Edit the notebook and then sync the output with this file. # # flake8: noqa # DO NOT EDIT # # Contrasts Overview import numpy as np import statsmodels.api as sm # This document is based heavily on this excellent resource from UCLA # http://www.ats.ucla.edu/stat/r/library/contrast_coding.htm # A categorical variable of K categories, or levels, usually enters a # regression as a sequence of K-1 dummy variables. This amounts to a linear # hypothesis on the level means. That is, each test statistic for these # variables amounts to testing whether the mean for that level is # statistically significantly different from the mean of the base category. # This dummy coding is called Treatment coding in R parlance, and we will # follow this convention. There are, however, different coding methods that # amount to different sets of linear hypotheses. # # In fact, the dummy coding is not technically a contrast coding. This is # because the dummy variables add to one and are not functionally # independent of the model's intercept. On the other hand, a set of # *contrasts* for a categorical variable with `k` levels is a set of `k-1` # functionally independent linear combinations of the factor level means # that are also independent of the sum of the dummy variables. The dummy # coding is not wrong *per se*. It captures all of the coefficients, but it # complicates matters when the model assumes independence of the # coefficients such as in ANOVA. Linear regression models do not assume # independence of the coefficients and thus dummy coding is often the only # coding that is taught in this context. # # To have a look at the contrast matrices in Patsy, we will use data from # UCLA ATS. First let's load the data. # #### Example Data import pandas as pd url = 'https://stats.idre.ucla.edu/stat/data/hsb2.csv' hsb2 = pd.read_table(url, delimiter=",") hsb2.head(10) # It will be instructive to look at the mean of the dependent variable, # write, for each level of race ((1 = Hispanic, 2 = Asian, 3 = African # American and 4 = Caucasian)). hsb2.groupby('race')['write'].mean() # #### Treatment (Dummy) Coding # Dummy coding is likely the most well known coding scheme. It compares # each level of the categorical variable to a base reference level. The base # reference level is the value of the intercept. It is the default contrast # in Patsy for unordered categorical factors. The Treatment contrast matrix # for race would be from patsy.contrasts import Treatment levels = [1, 2, 3, 4] contrast = Treatment(reference=0).code_without_intercept(levels) print(contrast.matrix) # Here we used `reference=0`, which implies that the first level, # Hispanic, is the reference category against which the other level effects # are measured. As mentioned above, the columns do not sum to zero and are # thus not independent of the intercept. To be explicit, let's look at how # this would encode the `race` variable. hsb2.race.head(10) print(contrast.matrix[hsb2.race - 1, :][:20]) sm.categorical(hsb2.race.values) # This is a bit of a trick, as the `race` category conveniently maps to # zero-based indices. If it does not, this conversion happens under the # hood, so this will not work in general but nonetheless is a useful exercise # to fix ideas. The below illustrates the output using the three contrasts # above from statsmodels.formula.api import ols mod = ols("write ~ C(race, Treatment)", data=hsb2) res = mod.fit() print(res.summary()) # We explicitly gave the contrast for race; however, since Treatment is # the default, we could have omitted this. # ### Simple Coding # Like Treatment Coding, Simple Coding compares each level to a fixed # reference level. However, with simple coding, the intercept is the grand # mean of all the levels of the factors. Patsy does not have the Simple # contrast included, but you can easily define your own contrasts. To do so, # write a class that contains a code_with_intercept and a # code_without_intercept method that returns a patsy.contrast.ContrastMatrix # instance from patsy.contrasts import ContrastMatrix def _name_levels(prefix, levels): return ["[%s%s]" % (prefix, level) for level in levels] class Simple(object): def _simple_contrast(self, levels): nlevels = len(levels) contr = -1. / nlevels * np.ones((nlevels, nlevels - 1)) contr[1:][np.diag_indices(nlevels - 1)] = (nlevels - 1.) / nlevels return contr def code_with_intercept(self, levels): contrast = np.column_stack((np.ones(len(levels)), self._simple_contrast(levels))) return ContrastMatrix(contrast, _name_levels("Simp.", levels)) def code_without_intercept(self, levels): contrast = self._simple_contrast(levels) return ContrastMatrix(contrast, _name_levels("Simp.", levels[:-1])) hsb2.groupby('race')['write'].mean().mean() contrast = Simple().code_without_intercept(levels) print(contrast.matrix) mod = ols("write ~ C(race, Simple)", data=hsb2) res = mod.fit() print(res.summary()) # ### Sum (Deviation) Coding # Sum coding compares the mean of the dependent variable for a given level # to the overall mean of the dependent variable over all the levels. That # is, it uses contrasts between each of the first k-1 levels and level k In # this example, level 1 is compared to all the others, level 2 to all the # others, and level 3 to all the others. from patsy.contrasts import Sum contrast = Sum().code_without_intercept(levels) print(contrast.matrix) mod = ols("write ~ C(race, Sum)", data=hsb2) res = mod.fit() print(res.summary()) # This corresponds to a parameterization that forces all the coefficients # to sum to zero. Notice that the intercept here is the grand mean where the # grand mean is the mean of means of the dependent variable by each level. hsb2.groupby('race')['write'].mean().mean() # ### Backward Difference Coding # In backward difference coding, the mean of the dependent variable for a # level is compared with the mean of the dependent variable for the prior # level. This type of coding may be useful for a nominal or an ordinal # variable. from patsy.contrasts import Diff contrast = Diff().code_without_intercept(levels) print(contrast.matrix) mod = ols("write ~ C(race, Diff)", data=hsb2) res = mod.fit() print(res.summary()) # For example, here the coefficient on level 1 is the mean of `write` at # level 2 compared with the mean at level 1. Ie., res.params["C(race, Diff)[D.1]"] hsb2.groupby('race').mean()["write"][2] - hsb2.groupby( 'race').mean()["write"][1] # ### Helmert Coding # Our version of Helmert coding is sometimes referred to as Reverse # Helmert Coding. The mean of the dependent variable for a level is compared # to the mean of the dependent variable over all previous levels. Hence, the # name 'reverse' being sometimes applied to differentiate from forward # Helmert coding. This comparison does not make much sense for a nominal # variable such as race, but we would use the Helmert contrast like so: from patsy.contrasts import Helmert contrast = Helmert().code_without_intercept(levels) print(contrast.matrix) mod = ols("write ~ C(race, Helmert)", data=hsb2) res = mod.fit() print(res.summary()) # To illustrate, the comparison on level 4 is the mean of the dependent # variable at the previous three levels taken from the mean at level 4 grouped = hsb2.groupby('race') grouped.mean()["write"][4] - grouped.mean()["write"][:3].mean() # As you can see, these are only equal up to a constant. Other versions of # the Helmert contrast give the actual difference in means. Regardless, the # hypothesis tests are the same. k = 4 1. / k * (grouped.mean()["write"][k] - grouped.mean()["write"][:k - 1].mean()) k = 3 1. / k * (grouped.mean()["write"][k] - grouped.mean()["write"][:k - 1].mean()) # ### Orthogonal Polynomial Coding # The coefficients taken on by polynomial coding for `k=4` levels are the # linear, quadratic, and cubic trends in the categorical variable. The # categorical variable here is assumed to be represented by an underlying, # equally spaced numeric variable. Therefore, this type of encoding is used # only for ordered categorical variables with equal spacing. In general, the # polynomial contrast produces polynomials of order `k-1`. Since `race` is # not an ordered factor variable let's use `read` as an example. First we # need to create an ordered categorical from `read`. hsb2['readcat'] = np.asarray(pd.cut(hsb2.read, bins=3)) hsb2.groupby('readcat').mean()['write'] from patsy.contrasts import Poly levels = hsb2.readcat.unique().tolist() contrast = Poly().code_without_intercept(levels) print(contrast.matrix) mod = ols("write ~ C(readcat, Poly)", data=hsb2) res = mod.fit() print(res.summary()) # As you can see, readcat has a significant linear effect on the dependent # variable `write` but not a significant quadratic or cubic effect.
bsd-3-clause
alexsavio/scikit-learn
examples/gaussian_process/plot_gpc_iris.py
81
2231
""" ===================================================== Gaussian process classification (GPC) on iris dataset ===================================================== This example illustrates the predicted probability of GPC for an isotropic and anisotropic RBF kernel on a two-dimensional version for the iris-dataset. The anisotropic RBF kernel obtains slightly higher log-marginal-likelihood by assigning different length-scales to the two feature dimensions. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import datasets from sklearn.gaussian_process import GaussianProcessClassifier from sklearn.gaussian_process.kernels import RBF # import some data to play with iris = datasets.load_iris() X = iris.data[:, :2] # we only take the first two features. y = np.array(iris.target, dtype=int) h = .02 # step size in the mesh kernel = 1.0 * RBF([1.0]) gpc_rbf_isotropic = GaussianProcessClassifier(kernel=kernel).fit(X, y) kernel = 1.0 * RBF([1.0, 1.0]) gpc_rbf_anisotropic = GaussianProcessClassifier(kernel=kernel).fit(X, y) # create a mesh to plot in x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) titles = ["Isotropic RBF", "Anisotropic RBF"] plt.figure(figsize=(10, 5)) for i, clf in enumerate((gpc_rbf_isotropic, gpc_rbf_anisotropic)): # Plot the predicted probabilities. For that, we will assign a color to # each point in the mesh [x_min, m_max]x[y_min, y_max]. plt.subplot(1, 2, i + 1) Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape((xx.shape[0], xx.shape[1], 3)) plt.imshow(Z, extent=(x_min, x_max, y_min, y_max), origin="lower") # Plot also the training points plt.scatter(X[:, 0], X[:, 1], c=np.array(["r", "g", "b"])[y]) plt.xlabel('Sepal length') plt.ylabel('Sepal width') plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.xticks(()) plt.yticks(()) plt.title("%s, LML: %.3f" % (titles[i], clf.log_marginal_likelihood(clf.kernel_.theta))) plt.tight_layout() plt.show()
bsd-3-clause
wkerzendorf/wsynphot
wsynphot/base.py
1
15987
# defining the base filter curve classes import os from scipy import interpolate from wsynphot.spectrum1d import SKSpectrum1D as Spectrum1D import pandas as pd from wsynphot.io.cache_filters import load_filter_index, load_transmission_data from astropy import units as u, constants as const from astropy import utils import numpy as np from wsynphot.calibration import get_vega_calibration_spectrum def calculate_filter_flux_density(spectrum, filter): """ Calculate the average flux through the filter by evaluating the integral ..math:: f_lambda = \\frac{\\int_}{} Parameters ---------- spectrum: ~specutils.Spectrum1D spectrum object filter: ~wsynphot.FilterCurve :return: """ filtered_spectrum = filter * spectrum filter_flux_density = np.trapz(filtered_spectrum.flux * filtered_spectrum.wavelength, filtered_spectrum.wavelength) return filter_flux_density def calculate_vega_magnitude(spectrum, filter): filter_flux_density = calculate_filter_flux_density(spectrum, filter) wavelength_delta = filter.calculate_wavelength_delta() filtered_f_lambda = (filter_flux_density / wavelength_delta) zp_vega_f_lambda = filter.zp_vega_f_lambda return -2.5 * np.log10(filtered_f_lambda / zp_vega_f_lambda) def calculate_ab_magnitude(spectrum, filter): filtered_f_lambda = (calculate_filter_flux_density(spectrum, filter) / filter.calculate_wavelength_delta()) return -2.5 * np.log10(filtered_f_lambda / filter.zp_ab_f_lambda) def list_filters(): """ List available filter sets along with their properties """ return load_filter_index() class BaseFilterCurve(object): """ Basic filter curve class Parameters ---------- wavelength: ~astropy.units.Quantity wavelength for filter curve transmission_lambda: numpy.ndarray transmission_lambda for filter curve interpolation_kind: str allowed interpolation kinds given in scipy.interpolate.interp1d """ @classmethod def load_filter(cls, filter_id=None, interpolation_kind='linear'): """ Parameters ---------- filter_id: str or None if None is provided will return a DataFrame of all filters interpolation_kind: str see scipy.interpolation.interp1d """ if filter_id is None: return list_filters() else: filter = load_transmission_data(filter_id) wavelength_unit = 'angstrom' wavelength = filter['Wavelength'].values * u.Unit(wavelength_unit) return cls(wavelength, filter['Transmission'].values, interpolation_kind=interpolation_kind, filter_id=filter_id) def __init__(self, wavelength, transmission_lambda, interpolation_kind='linear', filter_id=None): if not hasattr(wavelength, 'unit'): raise ValueError('the wavelength needs to be a astropy quantity') self.wavelength = wavelength self.transmission_lambda = transmission_lambda self.interpolation_object = interpolate.interp1d(self.wavelength, self.transmission_lambda, kind=interpolation_kind, bounds_error=False, fill_value=0.0) self.filter_id = filter_id def __mul__(self, other): if not hasattr(other, 'flux') or not hasattr(other, 'wavelength'): raise ValueError('requiring a specutils.Spectrum1D-like object that' 'has attributes "flux" and "wavelength"') #new_wavelength = np.union1d(other.wavelength.to(self.wavelength.unit).value, # self.wavelength.value) * self.wavelength.unit transmission = self.interpolate(other.wavelength) return Spectrum1D.from_array(other.wavelength, transmission * other.flux) def __rmul__(self, other): return self.__mul__(other) @utils.lazyproperty def lambda_pivot(self): """ Calculate the pivotal wavelength as defined in Bessell & Murphy 2012 .. math:: \\lambda_\\textrm{pivot} = \\sqrt{ \\frac{\\int S(\\lambda)\\lambda d\\lambda}{\\int \\frac{S(\\lambda)}{\\lambda}}}\\\\ <f_\\nu> = <f_\\lambda>\\frac{\\lambda_\\textrm{pivot}^2}{c} """ return np.sqrt((np.trapz(self.transmission_lambda * self.wavelength, self.wavelength)/ (np.trapz(self.transmission_lambda / self.wavelength, self.wavelength)))) @utils.lazyproperty def wavelength_start(self): return self.get_wavelength_start() @utils.lazyproperty def wavelength_end(self): return self.get_wavelength_end() @utils.lazyproperty def zp_ab_f_lambda(self): return (self.zp_ab_f_nu * const.c / self.lambda_pivot**2).to( 'erg/s/cm^2/Angstrom', u.spectral()) @utils.lazyproperty def zp_ab_f_nu(self): return (3631 * u.Jy).to('erg/s/cm^2/Hz') @utils.lazyproperty def zp_vega_f_lambda(self): return (calculate_filter_flux_density(get_vega_calibration_spectrum(), self) / self.calculate_wavelength_delta()) def interpolate(self, wavelength): """ Interpolate the filter onto new wavelength grid Parameters ---------- wavelength: ~astropy.units.Quantity wavelength grid to interpolate on """ converted_wavelength = wavelength.to(self.wavelength.unit) return self.interpolation_object(converted_wavelength) def _calculuate_flux_density(self, wavelength, flux): return _calculcate_filter_flux_density(flux, self) def calculate_flux_density(self, spectrum): return calculate_filter_flux_density(spectrum, self) def calculate_f_lambda(self, spectrum): return (self.calculate_flux_density(spectrum) / self.calculate_wavelength_delta()) def calculate_wavelength_delta(self): """ Calculate the Integral :math:`\integral :return: """ return np.trapz(self.transmission_lambda * self.wavelength, self.wavelength) def calculate_weighted_average_wavelength(self): """ Calculate integral :math:`\\frac{\\int S(\\lambda) \\lambda d\\lambda}{\\int S(\\lambda) d\\lambda}` Returns : ~astropy.units.Quantity """ return (np.trapz(self.transmission_lambda * self.wavelength, self.wavelength) / self.calculate_wavelength_delta()) def calculate_vega_magnitude(self, spectrum): __doc__ = calculate_vega_magnitude.__doc__ return calculate_vega_magnitude(spectrum, self) def calculate_ab_magnitude(self, spectrum): __doc__ = calculate_ab_magnitude.__doc__ return calculate_ab_magnitude(spectrum, self) def convert_ab_magnitude_to_f_lambda(self, mag): return 10**(-0.4*mag) * self.zp_ab_f_lambda def convert_vega_magnitude_to_f_lambda(self, mag): return 10**(-0.4*mag) * self.zp_vega_f_lambda def plot(self, ax, scale_max=None, make_label=True, plot_kwargs={}, format_filter_id=None): if scale_max is not None: if hasattr(scale_max, 'unit'): scale_max = scale_max.value transmission = (self.transmission_lambda * scale_max / self.transmission_lambda.max()) else: transmission = self.transmission_lambda ax.plot(self.wavelength, transmission, **plot_kwargs) ax.set_xlabel('Wavelength [{0}]'.format( self.wavelength.unit.to_string(format='latex'))) ax.set_ylabel('Transmission [1]') if make_label==True and self.filter_id is not None: if format_filter_id is not None: filter_id = format_filter_id(self.filter_id) else: filter_id = self.filter_id text_x = (self.lambda_pivot).value text_y = transmission.max()/2 ax.text(text_x, text_y, filter_id, horizontalalignment='center', verticalalignment='center', bbox=dict(facecolor='white', alpha=0.5)) def get_wavelength_start(self, threshold=0.01): norm_cum_sum = (np.cumsum(self.transmission_lambda) / np.sum(self.transmission_lambda)) return self.wavelength[norm_cum_sum.searchsorted(threshold)] def get_wavelength_end(self, threshold=0.01): norm_cum_sum = (np.cumsum(self.transmission_lambda) / np.sum(self.transmission_lambda)) return self.wavelength[norm_cum_sum.searchsorted(1 - threshold)] class FilterCurve(BaseFilterCurve): def __repr__(self): if self.filter_id is None: filter_id = "{0:x}".format(self.__hash__()) else: filter_id = self.filter_id return "FilterCurve <{0}>".format(filter_id) class FilterSet(object): """ A set of filters Parameters ---------- filter_set: ~list a list of strings or a list of filters interpolation_kind: ~str scipy interpolaton kinds """ def __init__(self, filter_set, interpolation_kind='linear'): if hasattr(filter_set[0], 'wavelength'): self.filter_set = filter_set else: self.filter_set = [FilterCurve.load_filter(filter_id, interpolation_kind= interpolation_kind) for filter_id in filter_set] def __iter__(self): self.current_filter_idx = 0 return self def __next__(self): try: item = self.filter_set[self.current_filter_idx] except IndexError: raise StopIteration self.current_filter_idx += 1 return item next = __next__ def __getitem__(self, item): return self.filter_set.__getitem__(item) def __repr__(self): return "<{0} \n{1}>".format(self.__class__.__name__, '\n'.join( [item.filter_id for item in self.filter_set])) @property def lambda_pivot(self): return u.Quantity([item.lambda_pivot for item in self]) def calculate_f_lambda(self, spectrum): return u.Quantity( [item.calculate_f_lambda(spectrum) for item in self.filter_set]) def calculate_ab_magnitudes(self, spectrum): mags = [item.calculate_ab_magnitude(spectrum) for item in self.filter_set] return mags def calculate_vega_magnitudes(self, spectrum): mags = [item.calculate_vega_magnitude(spectrum) for item in self.filter_set] return mags def convert_ab_magnitudes_to_f_lambda(self, magnitudes): if len(magnitudes) != len(self.filter_set): raise ValueError("Filter set and magnitudes need to have the same " "number of items") f_lambdas = [filter.convert_ab_magnitude_to_f_lambda(mag) for filter, mag in zip(self.filter_set, magnitudes)] return u.Quantity(f_lambdas) def convert_ab_magnitude_uncertainties_to_f_lambda_uncertainties( self, magnitudes, magnitude_uncertainties): if len(magnitudes) != len(self.filter_set): raise ValueError("Filter set and magnitudes need to have the same " "number of items") f_lambda_positive_uncertainties = u.Quantity( [filter.convert_ab_magnitude_to_f_lambda(mag + mag_uncertainty) for filter, mag, mag_uncertainty in zip( self.filter_set, magnitudes, magnitude_uncertainties, )]) f_lambda_negative_uncertainties = u.Quantity( [filter.convert_ab_magnitude_to_f_lambda(mag - mag_uncertainty) for filter, mag, mag_uncertainty in zip( self.filter_set, magnitudes, magnitude_uncertainties)]) return np.abs(u.Quantity((f_lambda_positive_uncertainties, f_lambda_negative_uncertainties)) - self.convert_ab_magnitudes_to_f_lambda(magnitudes)) def convert_vega_magnitude_uncertainties_to_f_lambda_uncertainties( self, magnitudes, magnitude_uncertainties): if len(magnitudes) != len(self.filter_set): raise ValueError("Filter set and magnitudes need to have the same " "number of items") f_lambda_positive_uncertainties = u.Quantity( [filter.convert_vega_magnitude_to_f_lambda(mag + mag_uncertainty) for filter, mag, mag_uncertainty in zip( self.filter_set, magnitudes, magnitude_uncertainties, )]) f_lambda_negative_uncertainties = u.Quantity( [filter.convert_vega_magnitude_to_f_lambda(mag - mag_uncertainty) for filter, mag, mag_uncertainty in zip( self.filter_set, magnitudes, magnitude_uncertainties)]) return np.abs(u.Quantity((f_lambda_positive_uncertainties, f_lambda_negative_uncertainties)) - self.convert_vega_magnitudes_to_f_lambda(magnitudes)) def convert_vega_magnitudes_to_f_lambda(self, magnitudes): if len(magnitudes) != len(self.filter_set): raise ValueError("Filter set and magnitudes need to have the same " "number of items") f_lambdas = [filter.convert_vega_magnitude_to_f_lambda(mag) for filter, mag in zip(self.filter_set, magnitudes)] return u.Quantity(f_lambdas) def plot_spectrum(self, spectrum, ax, make_labels=True, spectrum_plot_kwargs={}, filter_plot_kwargs={}, filter_color_list=None, format_filter_id=None): """ plot a spectrum with the given filters spectrum: ax: make_labels: :return: """ ax.plot(spectrum.wavelength, spectrum.flux, **spectrum_plot_kwargs) for i, filter in enumerate(self.filter_set): filter_scale = filter.calculate_f_lambda(spectrum) if filter_color_list is not None: filter_plot_kwargs['color'] = filter_color_list[i] filter.plot(ax, scale_max=filter_scale, make_label=make_labels, plot_kwargs=filter_plot_kwargs, format_filter_id=format_filter_id) class MagnitudeSet(FilterSet): def __init__(self, filter_set, magnitudes, magnitude_uncertainties=None, interpolation_kind='linear'): super(MagnitudeSet, self).__init__(filter_set, interpolation_kind= interpolation_kind) self.magnitudes = np.array(magnitudes) self.magnitude_uncertainties = np.array(magnitude_uncertainties) def __repr__(self): mag_str = '{0} {1:.4f} +/- {2:.4f}' mag_data = [] for i, filter in enumerate(self.filter_set): unc = (np.nan if self.magnitude_uncertainties is None else self.magnitude_uncertainties[i]) mag_data.append(mag_str.format(filter.filter_id, self.magnitudes[i], unc)) return "<{0} \n{1}>".format(self.__class__.__name__, '\n'.join(mag_data))
bsd-3-clause
rubennj/pvlib-python
docs/sphinx/sphinxext/numpydoc/docscrape_sphinx.py
41
9437
from __future__ import division, absolute_import, print_function import sys, re, inspect, textwrap, pydoc import sphinx import collections from .docscrape import NumpyDocString, FunctionDoc, ClassDoc if sys.version_info[0] >= 3: sixu = lambda s: s else: sixu = lambda s: unicode(s, 'unicode_escape') class SphinxDocString(NumpyDocString): def __init__(self, docstring, config={}): NumpyDocString.__init__(self, docstring, config=config) self.load_config(config) def load_config(self, config): self.use_plots = config.get('use_plots', False) self.class_members_toctree = config.get('class_members_toctree', True) # string conversion routines def _str_header(self, name, symbol='`'): return ['.. rubric:: ' + name, ''] def _str_field_list(self, name): return [':' + name + ':'] def _str_indent(self, doc, indent=4): out = [] for line in doc: out += [' '*indent + line] return out def _str_signature(self): return [''] if self['Signature']: return ['``%s``' % self['Signature']] + [''] else: return [''] def _str_summary(self): return self['Summary'] + [''] def _str_extended_summary(self): return self['Extended Summary'] + [''] def _str_returns(self): out = [] if self['Returns']: out += self._str_field_list('Returns') out += [''] for param, param_type, desc in self['Returns']: if param_type: out += self._str_indent(['**%s** : %s' % (param.strip(), param_type)]) else: out += self._str_indent([param.strip()]) if desc: out += [''] out += self._str_indent(desc, 8) out += [''] return out def _str_param_list(self, name): out = [] if self[name]: out += self._str_field_list(name) out += [''] for param, param_type, desc in self[name]: if param_type: out += self._str_indent(['**%s** : %s' % (param.strip(), param_type)]) else: out += self._str_indent(['**%s**' % param.strip()]) if desc: out += [''] out += self._str_indent(desc, 8) out += [''] return out @property def _obj(self): if hasattr(self, '_cls'): return self._cls elif hasattr(self, '_f'): return self._f return None def _str_member_list(self, name): """ Generate a member listing, autosummary:: table where possible, and a table where not. """ out = [] if self[name]: out += ['.. rubric:: %s' % name, ''] prefix = getattr(self, '_name', '') if prefix: prefix = '~%s.' % prefix autosum = [] others = [] for param, param_type, desc in self[name]: param = param.strip() # Check if the referenced member can have a docstring or not param_obj = getattr(self._obj, param, None) if not (callable(param_obj) or isinstance(param_obj, property) or inspect.isgetsetdescriptor(param_obj)): param_obj = None if param_obj and (pydoc.getdoc(param_obj) or not desc): # Referenced object has a docstring autosum += [" %s%s" % (prefix, param)] else: others.append((param, param_type, desc)) if autosum: out += ['.. autosummary::'] if self.class_members_toctree: out += [' :toctree:'] out += [''] + autosum if others: maxlen_0 = max(3, max([len(x[0]) for x in others])) hdr = sixu("=")*maxlen_0 + sixu(" ") + sixu("=")*10 fmt = sixu('%%%ds %%s ') % (maxlen_0,) out += ['', hdr] for param, param_type, desc in others: desc = sixu(" ").join(x.strip() for x in desc).strip() if param_type: desc = "(%s) %s" % (param_type, desc) out += [fmt % (param.strip(), desc)] out += [hdr] out += [''] return out def _str_section(self, name): out = [] if self[name]: out += self._str_header(name) out += [''] content = textwrap.dedent("\n".join(self[name])).split("\n") out += content out += [''] return out def _str_see_also(self, func_role): out = [] if self['See Also']: see_also = super(SphinxDocString, self)._str_see_also(func_role) out = ['.. seealso::', ''] out += self._str_indent(see_also[2:]) return out def _str_warnings(self): out = [] if self['Warnings']: out = ['.. warning::', ''] out += self._str_indent(self['Warnings']) return out def _str_index(self): idx = self['index'] out = [] if len(idx) == 0: return out out += ['.. index:: %s' % idx.get('default','')] for section, references in idx.items(): if section == 'default': continue elif section == 'refguide': out += [' single: %s' % (', '.join(references))] else: out += [' %s: %s' % (section, ','.join(references))] return out def _str_references(self): out = [] if self['References']: out += self._str_header('References') if isinstance(self['References'], str): self['References'] = [self['References']] out.extend(self['References']) out += [''] # Latex collects all references to a separate bibliography, # so we need to insert links to it if sphinx.__version__ >= "0.6": out += ['.. only:: latex',''] else: out += ['.. latexonly::',''] items = [] for line in self['References']: m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I) if m: items.append(m.group(1)) out += [' ' + ", ".join(["[%s]_" % item for item in items]), ''] return out def _str_examples(self): examples_str = "\n".join(self['Examples']) if (self.use_plots and 'import matplotlib' in examples_str and 'plot::' not in examples_str): out = [] out += self._str_header('Examples') out += ['.. plot::', ''] out += self._str_indent(self['Examples']) out += [''] return out else: return self._str_section('Examples') def __str__(self, indent=0, func_role="obj"): out = [] out += self._str_signature() out += self._str_index() + [''] out += self._str_summary() out += self._str_extended_summary() out += self._str_param_list('Parameters') out += self._str_returns() for param_list in ('Other Parameters', 'Raises', 'Warns'): out += self._str_param_list(param_list) out += self._str_warnings() out += self._str_see_also(func_role) out += self._str_section('Notes') out += self._str_references() out += self._str_examples() for param_list in ('Attributes', 'Methods'): out += self._str_member_list(param_list) out = self._str_indent(out,indent) return '\n'.join(out) class SphinxFunctionDoc(SphinxDocString, FunctionDoc): def __init__(self, obj, doc=None, config={}): self.load_config(config) FunctionDoc.__init__(self, obj, doc=doc, config=config) class SphinxClassDoc(SphinxDocString, ClassDoc): def __init__(self, obj, doc=None, func_doc=None, config={}): self.load_config(config) ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config) class SphinxObjDoc(SphinxDocString): def __init__(self, obj, doc=None, config={}): self._f = obj self.load_config(config) SphinxDocString.__init__(self, doc, config=config) def get_doc_object(obj, what=None, doc=None, config={}): if what is None: if inspect.isclass(obj): what = 'class' elif inspect.ismodule(obj): what = 'module' elif isinstance(obj, collections.Callable): what = 'function' else: what = 'object' if what == 'class': return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc, config=config) elif what in ('function', 'method'): return SphinxFunctionDoc(obj, doc=doc, config=config) else: if doc is None: doc = pydoc.getdoc(obj) return SphinxObjDoc(obj, doc, config=config)
bsd-3-clause
bradleyhd/netsim
nodes_vs_routing_speed.py
1
2878
import matplotlib.pyplot as plt import numpy as np import math from scipy.optimize import curve_fit def linear(x, a, b): return a * x + b def quadratic(x, a, b, c): return a * x**2 + b * x + c def exponential(x, a, b, c): return a * x**b + c fig = plt.figure(num=None, figsize=(12, 8), dpi=300, facecolor='k', edgecolor='k') xs = [[1014, 4383, 11821, 37698, 108043, 286563, 672292], [1014, 4383, 11821, 37698, 108043, 286563, 672292], [1014, 4383, 11821, 37698, 108043, 286563, 672292], [1014, 4383, 11821, 37698, 108043, 286563, 672292]] ys = [[0.00013309850001519408, 0.00059208550001699223, 0.002604027000003839, 0.004665461000030291, 0.014662985999962075, 0.023410306499954459, 0.041176939000251878], [0.00014861549998101964, 0.00055641999999522795, 0.002577900000005684, 0.0054275369999459144, 0.021226498000032734, 0.029786237500047719, 0.059782716000881919], [0.00012334000000180367, 0.00043368899999052246, 0.0020054734999632728, 0.005848614000001362, 0.014609930999995413, 0.019599954500336025, 0.028973604500606598], [0.00012613299999486571, 0.00044437049999146438, 0.0021501399999692694, 0.0055929929999933847, 0.019908546500118973, 0.039582631500252319, 0.054390303499531001]] ys = np.array(ys) * 1000 def graph(i, label, color, marker, l_marker): y = np.array(ys[i]) x = np.array(xs[i]) xl = np.linspace(np.min(x), np.max(x), 500) popt, pcov = curve_fit(exponential, x, y) plt.scatter(x, y, label=label, color=color, marker=marker) plt.plot(xl, exponential(xl, *popt), color=color, linestyle=l_marker) blue = '#5738FF' purple = '#E747E7' orange = '#E7A725' green = '#A1FF47' red = '#FF1E43' gray = '#333333' white = 'w' graph(0, 'EDS5 - original graph', red, 'o', '--') graph(1, 'N5 - original graph', purple, 's', '--') graph(2, 'EDS5 - decision graph', blue, '^', '--') graph(3, 'N5 - decision graph', white, 'D', '--') ax = fig.gca() plt.title('Effects of Node Ordering on Routing Speed', color=white) plt.xlabel('Effective $\\vert V\/\\vert$') plt.ylabel('Routing Time (ms)') plt.axes().set_axis_bgcolor('black') ax.xaxis.label.set_color(white) ax.yaxis.label.set_color(white) ax.tick_params(axis='x', colors=white) ax.tick_params(axis='y', colors=white) ax.spines['bottom'].set_color(white) ax.spines['top'].set_color(white) ax.spines['left'].set_color(white) ax.spines['right'].set_color(white) legend = plt.legend(loc=0, numpoints=1, framealpha=0.0) legend.get_frame().set_facecolor('k') max_x = np.max(np.array(xs)) max_y = np.max(np.array(ys)) min_x = np.min(np.array(xs)) min_y = 0 - (max_y * 0.01) min_x = 0 - (max_x * 0.01) max_x *= 1.01 max_y *= 1.01 plt.axes().set_xlim([min_x, max_x]) plt.axes().set_ylim([min_y, max_y]) for text in legend.get_texts(): text.set_color(white) # plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0)) plt.savefig('nodes_vs_routing_speed.png', transparent=True) #plt.show()
gpl-3.0
akloster/bokeh
bokeh/properties.py
20
42601
""" Properties are objects that can be assigned as class level attributes on Bokeh models, to provide automatic serialization and validation. For example, the following defines a model that has integer, string, and list[float] properties:: class Model(HasProps): foo = Int bar = String baz = List(Float) The properties of this class can be initialized by specifying keyword arguments to the initializer:: m = Model(foo=10, bar="a str", baz=[1,2,3,4]) But also by setting the attributes on an instance:: m.foo = 20 Attempts to set a property to a value of the wrong type will result in a ``ValueError`` exception:: >>> m.foo = 2.3 Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/bryan/work/bokeh/bokeh/properties.py", line 585, in __setattr__ super(HasProps, self).__setattr__(name, value) File "/Users/bryan/work/bokeh/bokeh/properties.py", line 159, in __set__ raise e File "/Users/bryan/work/bokeh/bokeh/properties.py", line 152, in __set__ self.validate(value) File "/Users/bryan/work/bokeh/bokeh/properties.py", line 707, in validate (nice_join([ cls.__name__ for cls in self._underlying_type ]), value, type(value).__name__)) ValueError: expected a value of type int8, int16, int32, int64 or int, got 2.3 of type float Additionally, properties know how to serialize themselves, to be understood by BokehJS. """ from __future__ import absolute_import, print_function import re import types import difflib import datetime import dateutil.parser import collections from importlib import import_module from copy import copy from warnings import warn import inspect import logging logger = logging.getLogger(__name__) from six import integer_types, string_types, add_metaclass, iteritems import numpy as np from . import enums from .util.string import nice_join def field(name): ''' Convenience function do explicitly mark a field specification for a Bokeh model property. Args: name (str) : name of a data source field to reference for a property. Returns: dict : `{"field": name}` Note: This function is included for completeness. String values for property specifications are by default interpreted as field names. ''' return dict(field=name) def value(val): ''' Convenience function do explicitly mark a value specification for a Bokeh model property. Args: val (any) : a fixed value to specify for a property. Returns: dict : `{"value": name}` Note: String values for property specifications are by default interpreted as field names. This function is especially useful when you want to specify a fixed value with text properties. Example: .. code-block:: python # The following will take text values to render from a data source # column "text_column", but use a fixed value "12pt" for font size p.text("x", "y", text="text_column", text_font_size=value("12pt"), source=source) ''' return dict(value=val) bokeh_integer_types = (np.int8, np.int16, np.int32, np.int64) + integer_types # used to indicate properties that are not set (vs null, None, etc) class _NotSet(object): pass class DeserializationError(Exception): pass class Property(object): """ Base class for all type properties. """ def __init__(self, default=None, help=None): """ This is how the descriptor is created in the class declaration """ if isinstance(default, types.FunctionType): # aka. lazy value self.validate(default()) else: self.validate(default) self._default = default self.__doc__ = help self.alternatives = [] # This gets set by the class decorator at class creation time self.name = "unnamed" def __str__(self): return self.__class__.__name__ @property def _name(self): return "_" + self.name @property def default(self): if not isinstance(self._default, types.FunctionType): return copy(self._default) else: value = self._default() self.validate(value) return value @classmethod def autocreate(cls, name=None): """ Called by the metaclass to create a new instance of this descriptor if the user just assigned it to a property without trailing parentheses. """ return cls() def matches(self, new, old): # XXX: originally this code warned about not being able to compare values, but that # doesn't make sense, because most comparisons involving numpy arrays will fail with # ValueError exception, thus warning about inevitable. try: if new is None or old is None: return new is old # XXX: silence FutureWarning from NumPy else: return new == old except (KeyboardInterrupt, SystemExit): raise except Exception as e: logger.debug("could not compare %s and %s for property %s (Reason: %s)", new, old, self.name, e) return False def from_json(self, json, models=None): return json def transform(self, value): return value def validate(self, value): pass def is_valid(self, value): try: self.validate(value) except ValueError: return False else: return True def _get(self, obj): if not hasattr(obj, self._name): setattr(obj, self._name, self.default) return getattr(obj, self._name) def __get__(self, obj, owner=None): if obj is not None: return self._get(obj) elif owner is not None: return self else: raise ValueError("both 'obj' and 'owner' are None, don't know what to do") def __set__(self, obj, value): try: self.validate(value) except ValueError as e: for tp, converter in self.alternatives: if tp.is_valid(value): value = converter(value) break else: raise e else: value = self.transform(value) old = self.__get__(obj) obj._changed_vars.add(self.name) if self._name in obj.__dict__ and self.matches(value, old): return setattr(obj, self._name, value) obj._dirty = True if hasattr(obj, '_trigger'): if hasattr(obj, '_block_callbacks') and obj._block_callbacks: obj._callback_queue.append((self.name, old, value)) else: obj._trigger(self.name, old, value) def __delete__(self, obj): if hasattr(obj, self._name): delattr(obj, self._name) @property def has_ref(self): return False def accepts(self, tp, converter): tp = ParameterizedProperty._validate_type_param(tp) self.alternatives.append((tp, converter)) return self def __or__(self, other): return Either(self, other) class Include(object): """ Include other properties from mixin Models, with a given prefix. """ def __init__(self, delegate, help="", use_prefix=True): if not (isinstance(delegate, type) and issubclass(delegate, HasProps)): raise ValueError("expected a subclass of HasProps, got %r" % delegate) self.delegate = delegate self.help = help self.use_prefix = use_prefix class MetaHasProps(type): def __new__(cls, class_name, bases, class_dict): names = set() names_with_refs = set() container_names = set() # First pre-process to handle all the Includes includes = {} removes = set() for name, prop in class_dict.items(): if not isinstance(prop, Include): continue delegate = prop.delegate if prop.use_prefix: prefix = re.sub("_props$", "", name) + "_" else: prefix = "" for subpropname in delegate.class_properties(withbases=False): fullpropname = prefix + subpropname subprop = delegate.lookup(subpropname) if isinstance(subprop, Property): # If it's an actual instance, then we need to make a copy # so two properties don't write to the same hidden variable # inside the instance. subprop = copy(subprop) if "%s" in prop.help: doc = prop.help % subpropname.replace('_', ' ') else: doc = prop.help try: includes[fullpropname] = subprop(help=doc) except TypeError: includes[fullpropname] = subprop subprop.__doc__ = doc # Remove the name of the Include attribute itself removes.add(name) # Update the class dictionary, taking care not to overwrite values # from the delegates that the subclass may have explicitly defined for key, val in includes.items(): if key not in class_dict: class_dict[key] = val for tmp in removes: del class_dict[tmp] dataspecs = {} units_to_add = {} for name, prop in class_dict.items(): if isinstance(prop, Property): prop.name = name if prop.has_ref: names_with_refs.add(name) elif isinstance(prop, ContainerProperty): container_names.add(name) names.add(name) if isinstance(prop, DataSpec): dataspecs[name] = prop if hasattr(prop, '_units_type'): units_to_add[name+"_units"] = prop._units_type elif isinstance(prop, type) and issubclass(prop, Property): # Support the user adding a property without using parens, # i.e. using just the Property subclass instead of an # instance of the subclass newprop = prop.autocreate(name=name) class_dict[name] = newprop newprop.name = name names.add(name) # Process dataspecs if issubclass(prop, DataSpec): dataspecs[name] = newprop for name, prop in units_to_add.items(): prop.name = name names.add(name) class_dict[name] = prop class_dict["__properties__"] = names class_dict["__properties_with_refs__"] = names_with_refs class_dict["__container_props__"] = container_names if dataspecs: class_dict["_dataspecs"] = dataspecs return type.__new__(cls, class_name, bases, class_dict) def accumulate_from_subclasses(cls, propname): s = set() for c in inspect.getmro(cls): if issubclass(c, HasProps): s.update(getattr(c, propname)) return s @add_metaclass(MetaHasProps) class HasProps(object): def __init__(self, **properties): super(HasProps, self).__init__() self._changed_vars = set() for name, value in properties.items(): setattr(self, name, value) def __setattr__(self, name, value): props = sorted(self.properties()) if name.startswith("_") or name in props: super(HasProps, self).__setattr__(name, value) else: matches, text = difflib.get_close_matches(name.lower(), props), "similar" if not matches: matches, text = props, "possible" raise AttributeError("unexpected attribute '%s' to %s, %s attributes are %s" % (name, self.__class__.__name__, text, nice_join(matches))) def clone(self): """ Returns a duplicate of this object with all its properties set appropriately. Values which are containers are shallow-copied. """ return self.__class__(**self.changed_properties_with_values()) @classmethod def lookup(cls, name): return getattr(cls, name) @classmethod def properties_with_refs(cls): """ Returns a set of the names of this object's properties that have references. We traverse the class hierarchy and pull together the full list of properties. """ if not hasattr(cls, "__cached_allprops_with_refs"): s = accumulate_from_subclasses(cls, "__properties_with_refs__") cls.__cached_allprops_with_refs = s return cls.__cached_allprops_with_refs @classmethod def properties_containers(cls): """ Returns a list of properties that are containers """ if not hasattr(cls, "__cached_allprops_containers"): s = accumulate_from_subclasses(cls, "__container_props__") cls.__cached_allprops_containers = s return cls.__cached_allprops_containers @classmethod def properties(cls): """ Returns a set of the names of this object's properties. We traverse the class hierarchy and pull together the full list of properties. """ if not hasattr(cls, "__cached_allprops"): s = cls.class_properties() cls.__cached_allprops = s return cls.__cached_allprops @classmethod def dataspecs(cls): """ Returns a set of the names of this object's dataspecs (and dataspec subclasses). Traverses the class hierarchy. """ if not hasattr(cls, "__cached_dataspecs"): dataspecs = set() for c in reversed(inspect.getmro(cls)): if hasattr(c, "_dataspecs"): dataspecs.update(c._dataspecs.keys()) cls.__cached_dataspecs = dataspecs return cls.__cached_dataspecs @classmethod def dataspecs_with_refs(cls): dataspecs = {} for c in reversed(inspect.getmro(cls)): if hasattr(c, "_dataspecs"): dataspecs.update(c._dataspecs) return dataspecs def changed_vars(self): """ Returns which variables changed since the creation of the object, or the last called to reset_changed_vars(). """ return set.union(self._changed_vars, self.properties_with_refs(), self.properties_containers()) def reset_changed_vars(self): self._changed_vars = set() def properties_with_values(self): return dict([ (attr, getattr(self, attr)) for attr in self.properties() ]) def changed_properties(self): return self.changed_vars() def changed_properties_with_values(self): return dict([ (attr, getattr(self, attr)) for attr in self.changed_properties() ]) @classmethod def class_properties(cls, withbases=True): if withbases: return accumulate_from_subclasses(cls, "__properties__") else: return set(cls.__properties__) def set(self, **kwargs): """ Sets a number of properties at once """ for kw in kwargs: setattr(self, kw, kwargs[kw]) def pprint_props(self, indent=0): """ Prints the properties of this object, nicely formatted """ for key, value in self.properties_with_values().items(): print("%s%s: %r" % (" "*indent, key, value)) class PrimitiveProperty(Property): """ A base class for simple property types. Subclasses should define a class attribute ``_underlying_type`` that is a tuple of acceptable type values for the property. """ _underlying_type = None def validate(self, value): super(PrimitiveProperty, self).validate(value) if not (value is None or isinstance(value, self._underlying_type)): raise ValueError("expected a value of type %s, got %s of type %s" % (nice_join([ cls.__name__ for cls in self._underlying_type ]), value, type(value).__name__)) def from_json(self, json, models=None): if json is None or isinstance(json, self._underlying_type): return json else: expected = nice_join([ cls.__name__ for cls in self._underlying_type ]) raise DeserializationError("%s expected %s, got %s" % (self, expected, json)) class Bool(PrimitiveProperty): """ Boolean type property. """ _underlying_type = (bool,) class Int(PrimitiveProperty): """ Signed integer type property. """ _underlying_type = bokeh_integer_types class Float(PrimitiveProperty): """ Floating point type property. """ _underlying_type = (float, ) + bokeh_integer_types class Complex(PrimitiveProperty): """ Complex floating point type property. """ _underlying_type = (complex, float) + bokeh_integer_types class String(PrimitiveProperty): """ String type property. """ _underlying_type = string_types class Regex(String): """ Regex type property validates that text values match the given regular expression. """ def __init__(self, regex, default=None, help=None): self.regex = re.compile(regex) super(Regex, self).__init__(default=default, help=help) def validate(self, value): super(Regex, self).validate(value) if not (value is None or self.regex.match(value) is not None): raise ValueError("expected a string matching %r pattern, got %r" % (self.regex.pattern, value)) def __str__(self): return "%s(%r)" % (self.__class__.__name__, self.regex.pattern) class JSON(String): """ JSON type property validates that text values are valid JSON. .. note:: The string is transmitted and received by BokehJS as a *string* containing JSON content. i.e., you must use ``JSON.parse`` to unpack the value into a JavaScript hash. """ def validate(self, value): super(JSON, self).validate(value) if value is None: return try: import json json.loads(value) except ValueError: raise ValueError("expected JSON text, got %r" % value) class ParameterizedProperty(Property): """ Base class for Properties that have type parameters, e.g. ``List(String)``. """ @staticmethod def _validate_type_param(type_param): if isinstance(type_param, type): if issubclass(type_param, Property): return type_param() else: type_param = type_param.__name__ elif isinstance(type_param, Property): return type_param raise ValueError("expected a property as type parameter, got %s" % type_param) @property def type_params(self): raise NotImplementedError("abstract method") @property def has_ref(self): return any(type_param.has_ref for type_param in self.type_params) class ContainerProperty(ParameterizedProperty): """ Base class for Container-like type properties. """ pass class Seq(ContainerProperty): """ Sequence (list, tuple) type property. """ def _is_seq(self, value): return isinstance(value, collections.Container) and not isinstance(value, collections.Mapping) def _new_instance(self, value): return value def __init__(self, item_type, default=None, help=None): self.item_type = self._validate_type_param(item_type) super(Seq, self).__init__(default=default, help=help) @property def type_params(self): return [self.item_type] def validate(self, value): super(Seq, self).validate(value) if value is not None: if not (self._is_seq(value) and all(self.item_type.is_valid(item) for item in value)): raise ValueError("expected an element of %s, got %r" % (self, value)) def __str__(self): return "%s(%s)" % (self.__class__.__name__, self.item_type) def from_json(self, json, models=None): if json is None: return None elif isinstance(json, list): return self._new_instance([ self.item_type.from_json(item, models) for item in json ]) else: raise DeserializationError("%s expected a list or None, got %s" % (self, json)) class List(Seq): """ Python list type property. """ def __init__(self, item_type, default=[], help=None): # todo: refactor to not use mutable objects as default values. # Left in place for now because we want to allow None to express # opional values. Also in Dict. super(List, self).__init__(item_type, default=default, help=help) def _is_seq(self, value): return isinstance(value, list) class Array(Seq): """ NumPy array type property. """ def _is_seq(self, value): import numpy as np return isinstance(value, np.ndarray) def _new_instance(self, value): return np.array(value) class Dict(ContainerProperty): """ Python dict type property. If a default value is passed in, then a shallow copy of it will be used for each new use of this property. """ def __init__(self, keys_type, values_type, default={}, help=None): self.keys_type = self._validate_type_param(keys_type) self.values_type = self._validate_type_param(values_type) super(Dict, self).__init__(default=default, help=help) @property def type_params(self): return [self.keys_type, self.values_type] def validate(self, value): super(Dict, self).validate(value) if value is not None: if not (isinstance(value, dict) and \ all(self.keys_type.is_valid(key) and self.values_type.is_valid(val) for key, val in iteritems(value))): raise ValueError("expected an element of %s, got %r" % (self, value)) def __str__(self): return "%s(%s, %s)" % (self.__class__.__name__, self.keys_type, self.values_type) def from_json(self, json, models=None): if json is None: return None elif isinstance(json, dict): return { self.keys_type.from_json(key, models): self.values_type.from_json(value, models) for key, value in iteritems(json) } else: raise DeserializationError("%s expected a dict or None, got %s" % (self, json)) class Tuple(ContainerProperty): """ Tuple type property. """ def __init__(self, tp1, tp2, *type_params, **kwargs): self._type_params = list(map(self._validate_type_param, (tp1, tp2) + type_params)) super(Tuple, self).__init__(default=kwargs.get("default"), help=kwargs.get("help")) @property def type_params(self): return self._type_params def validate(self, value): super(Tuple, self).validate(value) if value is not None: if not (isinstance(value, (tuple, list)) and len(self.type_params) == len(value) and \ all(type_param.is_valid(item) for type_param, item in zip(self.type_params, value))): raise ValueError("expected an element of %s, got %r" % (self, value)) def __str__(self): return "%s(%s)" % (self.__class__.__name__, ", ".join(map(str, self.type_params))) def from_json(self, json, models=None): if json is None: return None elif isinstance(json, list): return tuple(type_param.from_json(item, models) for type_param, item in zip(self.type_params, json)) else: raise DeserializationError("%s expected a list or None, got %s" % (self, json)) class Instance(Property): """ Instance type property, for references to other Models in the object graph. """ def __init__(self, instance_type, default=None, help=None): if not isinstance(instance_type, (type,) + string_types): raise ValueError("expected a type or string, got %s" % instance_type) if isinstance(instance_type, type) and not issubclass(instance_type, HasProps): raise ValueError("expected a subclass of HasProps, got %s" % instance_type) self._instance_type = instance_type super(Instance, self).__init__(default=default, help=help) @property def instance_type(self): if isinstance(self._instance_type, str): module, name = self._instance_type.rsplit(".", 1) self._instance_type = getattr(import_module(module, "bokeh"), name) return self._instance_type @property def has_ref(self): return True def validate(self, value): super(Instance, self).validate(value) if value is not None: if not isinstance(value, self.instance_type): raise ValueError("expected an instance of type %s, got %s of type %s" % (self.instance_type.__name__, value, type(value).__name__)) def __str__(self): return "%s(%s)" % (self.__class__.__name__, self.instance_type.__name__) def from_json(self, json, models=None): if json is None: return None elif isinstance(json, dict): from .plot_object import PlotObject if issubclass(self.instance_type, PlotObject): if models is None: raise DeserializationError("%s can't deserialize without models" % self) else: model = models.get(json["id"]) if model is not None: return model else: raise DeserializationError("%s failed to deserilize reference to %s" % (self, json)) else: attrs = {} for name, value in iteritems(json): prop = self.instance_type.lookup(name) attrs[name] = prop.from_json(value, models) # XXX: this doesn't work when Instance(Superclass) := Subclass() # Serialization dict must carry type information to resolve this. return self.instance_type(**attrs) else: raise DeserializationError("%s expected a dict or None, got %s" % (self, json)) class This(Property): """ A reference to an instance of the class being defined. """ pass # Fake types, ABCs class Any(Property): """ Any type property accepts any values. """ pass class Function(Property): """ Function type property. """ pass class Event(Property): """ Event type property. """ pass class Interval(ParameterizedProperty): ''' Range type property ensures values are contained inside a given interval. ''' def __init__(self, interval_type, start, end, default=None, help=None): self.interval_type = self._validate_type_param(interval_type) self.interval_type.validate(start) self.interval_type.validate(end) self.start = start self.end = end super(Interval, self).__init__(default=default, help=help) @property def type_params(self): return [self.interval_type] def validate(self, value): super(Interval, self).validate(value) if not (value is None or self.interval_type.is_valid(value) and value >= self.start and value <= self.end): raise ValueError("expected a value of type %s in range [%s, %s], got %r" % (self.interval_type, self.start, self.end, value)) def __str__(self): return "%s(%s, %r, %r)" % (self.__class__.__name__, self.interval_type, self.start, self.end) class Byte(Interval): ''' Byte type property. ''' def __init__(self, default=0, help=None): super(Byte, self).__init__(Int, 0, 255, default=default, help=help) class Either(ParameterizedProperty): """ Takes a list of valid properties and validates against them in succession. """ def __init__(self, tp1, tp2, *type_params, **kwargs): self._type_params = list(map(self._validate_type_param, (tp1, tp2) + type_params)) default = kwargs.get("default", self._type_params[0].default) help = kwargs.get("help") super(Either, self).__init__(default=default, help=help) @property def type_params(self): return self._type_params def validate(self, value): super(Either, self).validate(value) if not (value is None or any(param.is_valid(value) for param in self.type_params)): raise ValueError("expected an element of either %s, got %r" % (nice_join(self.type_params), value)) def transform(self, value): for param in self.type_params: try: return param.transform(value) except ValueError: pass raise ValueError("Could not transform %r" % value) def from_json(self, json, models=None): for tp in self.type_params: try: return tp.from_json(json, models) except DeserializationError: pass else: raise DeserializationError("%s couldn't deserialize %s" % (self, json)) def __str__(self): return "%s(%s)" % (self.__class__.__name__, ", ".join(map(str, self.type_params))) def __or__(self, other): return self.__class__(*(self.type_params + [other]), default=self._default, help=self.help) class Enum(Property): """ An Enum with a list of allowed values. The first value in the list is the default value, unless a default is provided with the "default" keyword argument. """ def __init__(self, enum, *values, **kwargs): if not (not values and isinstance(enum, enums.Enumeration)): enum = enums.enumeration(enum, *values) self.allowed_values = enum._values default = kwargs.get("default", enum._default) help = kwargs.get("help") super(Enum, self).__init__(default=default, help=help) def validate(self, value): super(Enum, self).validate(value) if not (value is None or value in self.allowed_values): raise ValueError("invalid value for %s: %r; allowed values are %s" % (self.name, value, nice_join(self.allowed_values))) def __str__(self): return "%s(%s)" % (self.__class__.__name__, ", ".join(map(repr, self.allowed_values))) class Auto(Enum): def __init__(self): super(Auto, self).__init__("auto") def __str__(self): return self.__class__.__name__ # Properties useful for defining visual attributes class Color(Either): """ Accepts color definition in a variety of ways, and produces an appropriate serialization of its value for whatever backend. For colors, because we support named colors and hex values prefaced with a "#", when we are handed a string value, there is a little interpretation: if the value is one of the 147 SVG named colors or it starts with a "#", then it is interpreted as a value. If a 3-tuple is provided, then it is treated as an RGB (0..255). If a 4-tuple is provided, then it is treated as an RGBa (0..255), with alpha as a float between 0 and 1. (This follows the HTML5 Canvas API.) """ def __init__(self, default=None, help=None): types = (Enum(enums.NamedColor), Regex("^#[0-9a-fA-F]{6}$"), Tuple(Byte, Byte, Byte), Tuple(Byte, Byte, Byte, Percent)) super(Color, self).__init__(*types, default=default, help=help) def __str__(self): return self.__class__.__name__ class Align(Property): pass class DashPattern(Either): """ Dash type property. Express patterns that describe line dashes. ``DashPattern`` values can be specified in a variety of ways: * An enum: "solid", "dashed", "dotted", "dotdash", "dashdot" * a tuple or list of integers in the `HTML5 Canvas dash specification style`_. Note that if the list of integers has an odd number of elements, then it is duplicated, and that duplicated list becomes the new dash list. To indicate that dashing is turned off (solid lines), specify the empty list []. .. _HTML5 Canvas dash specification style: http://www.w3.org/html/wg/drafts/2dcontext/html5_canvas/#dash-list """ _dash_patterns = { "solid": [], "dashed": [6], "dotted": [2,4], "dotdash": [2,4,6,4], "dashdot": [6,4,2,4], } def __init__(self, default=[], help=None): types = Enum(enums.DashPattern), Regex(r"^(\d+(\s+\d+)*)?$"), Seq(Int) super(DashPattern, self).__init__(*types, default=default, help=help) def transform(self, value): value = super(DashPattern, self).transform(value) if isinstance(value, string_types): try: return self._dash_patterns[value] except KeyError: return [int(x) for x in value.split()] else: return value def __str__(self): return self.__class__.__name__ class Size(Float): """ Size type property. .. note:: ``Size`` is equivalent to an unsigned int. """ def validate(self, value): super(Size, self).validate(value) if not (value is None or 0.0 <= value): raise ValueError("expected a non-negative number, got %r" % value) class Percent(Float): """ Percentage type property. Percents are useful for specifying alphas and coverage and extents; more semantically meaningful than Float(0..1). """ def validate(self, value): super(Percent, self).validate(value) if not (value is None or 0.0 <= value <= 1.0): raise ValueError("expected a value in range [0, 1], got %r" % value) class Angle(Float): """ Angle type property. """ pass class Date(Property): """ Date (not datetime) type property. """ def __init__(self, default=datetime.date.today(), help=None): super(Date, self).__init__(default=default, help=help) def validate(self, value): super(Date, self).validate(value) if not (value is None or isinstance(value, (datetime.date,) + string_types + (float,) + bokeh_integer_types)): raise ValueError("expected a date, string or timestamp, got %r" % value) def transform(self, value): value = super(Date, self).transform(value) if isinstance(value, (float,) + bokeh_integer_types): try: value = datetime.date.fromtimestamp(value) except ValueError: value = datetime.date.fromtimestamp(value/1000) elif isinstance(value, string_types): value = dateutil.parser.parse(value).date() return value class Datetime(Property): """ Datetime type property. """ def __init__(self, default=datetime.date.today(), help=None): super(Datetime, self).__init__(default=default, help=help) def validate(self, value): super(Datetime, self).validate(value) if (isinstance(value, (datetime.datetime, datetime.date, np.datetime64))): return try: import pandas if isinstance(value, (pandas.Timestamp)): return except ImportError: pass raise ValueError("Expected a datetime instance, got %r" % value) def transform(self, value): value = super(Datetime, self).transform(value) return value # Handled by serialization in protocol.py for now class RelativeDelta(Dict): """ RelativeDelta type property for time deltas. """ def __init__(self, default={}, help=None): keys = Enum("years", "months", "days", "hours", "minutes", "seconds", "microseconds") values = Int super(RelativeDelta, self).__init__(keys, values, default=default, help=help) def __str__(self): return self.__class__.__name__ class DataSpec(Either): def __init__(self, typ, default, help=None): super(DataSpec, self).__init__(String, Dict(String, Either(String, typ)), typ, default=default, help=help) self._type = self._validate_type_param(typ) def to_dict(self, obj): val = getattr(obj, self._name, self.default) # Check for None value if val is None: return dict(value=None) # Check for spec type value try: self._type.validate(val) return dict(value=val) except ValueError: pass # Check for data source field name if isinstance(val, string_types): return dict(field=val) # Must be dict, return as-is return val def __str__(self): val = getattr(self, self._name, self.default) return "%s(%r)" % (self.__class__.__name__, val) class NumberSpec(DataSpec): def __init__(self, default, help=None): super(NumberSpec, self).__init__(Float, default=default, help=help) class StringSpec(DataSpec): def __init__(self, default, help=None): super(StringSpec, self).__init__(List(String), default=default, help=help) def __set__(self, obj, value): if isinstance(value, list): if len(value) != 1: raise TypeError("StringSpec convenience list values must have length 1") value = dict(value=value[0]) super(StringSpec, self).__set__(obj, value) class FontSizeSpec(DataSpec): def __init__(self, default, help=None): super(FontSizeSpec, self).__init__(List(String), default=default, help=help) def __set__(self, obj, value): if isinstance(value, string_types): warn('Setting a fixed font size value as a string %r is deprecated, ' 'set with value(%r) or [%r] instead' % (value, value, value), DeprecationWarning, stacklevel=2) if len(value) > 0 and value[0].isdigit(): value = dict(value=value) super(FontSizeSpec, self).__set__(obj, value) class UnitsSpec(NumberSpec): def __init__(self, default, units_type, units_default, help=None): super(UnitsSpec, self).__init__(default=default, help=help) self._units_type = self._validate_type_param(units_type) self._units_type.validate(units_default) self._units_type._default = units_default def to_dict(self, obj): d = super(UnitsSpec, self).to_dict(obj) d["units"] = getattr(obj, self.name+"_units") return d def __set__(self, obj, value): if isinstance(value, dict): units = value.pop("units", None) if units: setattr(obj, self.name+"_units", units) super(UnitsSpec, self).__set__(obj, value) def __str__(self): val = getattr(self, self._name, self.default) return "%s(%r, units_default=%r)" % (self.__class__.__name__, val, self._units_type._default) class AngleSpec(UnitsSpec): def __init__(self, default, units_default="rad", help=None): super(AngleSpec, self).__init__(default=default, units_type=Enum(enums.AngleUnits), units_default=units_default, help=help) class DistanceSpec(UnitsSpec): def __init__(self, default, units_default="data", help=None): super(DistanceSpec, self).__init__(default=default, units_type=Enum(enums.SpatialUnits), units_default=units_default, help=help) def __set__(self, obj, value): try: if value < 0: raise ValueError("Distances must be non-negative") except TypeError: pass super(DistanceSpec, self).__set__(obj, value) class ScreenDistanceSpec(NumberSpec): def to_dict(self, obj): d = super(ScreenDistanceSpec, self).to_dict(obj) d["units"] = "screen" return d def __set__(self, obj, value): try: if value < 0: raise ValueError("Distances must be non-negative") except TypeError: pass super(ScreenDistanceSpec, self).__set__(obj, value) class DataDistanceSpec(NumberSpec): def to_dict(self, obj): d = super(ScreenDistanceSpec, self).to_dict(obj) d["units"] = "data" return d def __set__(self, obj, value): try: if value < 0: raise ValueError("Distances must be non-negative") except TypeError: pass super(DataDistanceSpec, self).__set__(obj, value) class ColorSpec(DataSpec): def __init__(self, default, help=None): super(ColorSpec, self).__init__(Color, default=default, help=help) @classmethod def isconst(cls, arg): """ Returns True if the argument is a literal color. Check for a well-formed hexadecimal color value. """ return isinstance(arg, string_types) and \ ((len(arg) == 7 and arg[0] == "#") or arg in enums.NamedColor._values) @classmethod def is_color_tuple(cls, val): return isinstance(val, tuple) and len(val) in (3, 4) @classmethod def format_tuple(cls, colortuple): if len(colortuple) == 3: return "rgb%r" % (colortuple,) else: return "rgba%r" % (colortuple,) def to_dict(self, obj): val = getattr(obj, self._name, self.default) if val is None: return dict(value=None) # Check for hexadecimal or named color if self.isconst(val): return dict(value=val) # Check for RGB or RGBa tuple if isinstance(val, tuple): return dict(value=self.format_tuple(val)) # Check for data source field name if isinstance(val, string_types): return dict(field=val) # Must be dict, return as-is return val def validate(self, value): try: return super(ColorSpec, self).validate(value) except ValueError as e: # Check for tuple input if not yet a valid input type if self.is_color_tuple(value): return True else: raise e def transform(self, value): # Make sure that any tuple has either three integers, or three integers and one float if isinstance(value, tuple): value = tuple(int(v) if i < 3 else v for i, v in enumerate(value)) return value
bsd-3-clause
erscott/Wellderly
SWGR_v1.0/masterVar_chr_split.py
1
3344
''' Splits Complete Genomics masterVar files into chromosome specific masterVar files when given an input file path and an output directory path. e.g. >python masterVar_chr_split.py -i /path/to/masterVar.tsv.bz2 -o /path/to/output_dir/ Python package dependencies: pandas, numpy python 2.7 for argparse module ''' import pandas as pd import os, sys import argparse parser = argparse.ArgumentParser() parser.add_argument('-i', type=str,help='Specifies the input file, /path/to/CG_data/masterVar.tsv.bz2') parser.add_argument('-o', type=str,help='Specifies the output directory, e.g. /path/to/CG_data/chromosome/') def chr_split_mastervar(f_path, target_path): #Get header for masterVar header = os.popen('bzcat ' + f_path+ ' | head -100 | grep chromosome -n').readlines() #Creating Reader object for iterating through NA12878 CG masterVar file skip_rows = int(header[0].split(":")[0]) -1 mastervar_headings = os.popen('head -' + str(skip_rows) + f_path).readlines() #Creating pandas dataframe with chunksize 200,000 lines chunk = pd.read_table(f_path, chunksize=200000, sep="\t", skiprows=skip_rows,compression='bz2',dtype=object) chunk.columns = header[0].rstrip('\n').split(":")[1].split("\t") #relabeling columns prev_chr = 'chr1' #tracking chromosome position prev_target_write_file = None for mastervar in chunk: #iterate through mastervar file for current_chrom,chr_df in mastervar.groupby(['chromosome']): #split dataframe by chromosome for writing #check for increment to new chromosome if prev_chr != current_chrom: os.system('bzip2 ' + prev_target_write_file) #compress last chromosome file prev_chr = current_chrom #specifying output file path and chromosome-specific name file_name = f_path.split("/")[-1].rstrip(".tsv.bz2") #getting file prefix target_path = target_path.rstrip("/")+"/" #ensuring target path ends with fwd slash write_target_file_path = target_path +file_name + "_" + current_chrom +".tsv" #specify target directory and chrom file name #print write_target_file_path if len(os.popen('find '+ write_target_file_path + '').readlines()) == 0: #checking for output file os.system('bzcat '+ f_path + '| head -' + str(skip_rows) + " > " +write_target_file_path) #writing header if no output file found chr_df.to_csv(write_target_file_path, sep="\t", index=False, mode='a') #writing chromosome specific variants to output file else: #Suppress header if target file found chr_df.to_csv(write_target_file_path, sep="\t", index=False, mode='a', header=False) #writing chromosome specifc variants to output file w/o header prev_target_write_file = write_target_file_path #increment to current write_target_file_path return 'complete' opts = parser.parse_known_args() f_path, target_path = opts[0].i, opts[0].o assert f_path.split(".")[-2:] == ['tsv','bz2'], "expecting masterVar input file suffix .tsv.bz2" test = chr_split_mastervar(f_path, target_path) if test == 'complete': print 'All chromosomes processed'
bsd-3-clause
xwolf12/scikit-learn
benchmarks/bench_glm.py
297
1493
""" A comparison of different methods in GLM Data comes from a random square matrix. """ from datetime import datetime import numpy as np from sklearn import linear_model from sklearn.utils.bench import total_seconds if __name__ == '__main__': import pylab as pl n_iter = 40 time_ridge = np.empty(n_iter) time_ols = np.empty(n_iter) time_lasso = np.empty(n_iter) dimensions = 500 * np.arange(1, n_iter + 1) for i in range(n_iter): print('Iteration %s of %s' % (i, n_iter)) n_samples, n_features = 10 * i + 3, 10 * i + 3 X = np.random.randn(n_samples, n_features) Y = np.random.randn(n_samples) start = datetime.now() ridge = linear_model.Ridge(alpha=1.) ridge.fit(X, Y) time_ridge[i] = total_seconds(datetime.now() - start) start = datetime.now() ols = linear_model.LinearRegression() ols.fit(X, Y) time_ols[i] = total_seconds(datetime.now() - start) start = datetime.now() lasso = linear_model.LassoLars() lasso.fit(X, Y) time_lasso[i] = total_seconds(datetime.now() - start) pl.figure('scikit-learn GLM benchmark results') pl.xlabel('Dimensions') pl.ylabel('Time (s)') pl.plot(dimensions, time_ridge, color='r') pl.plot(dimensions, time_ols, color='g') pl.plot(dimensions, time_lasso, color='b') pl.legend(['Ridge', 'OLS', 'LassoLars'], loc='upper left') pl.axis('tight') pl.show()
bsd-3-clause
PTDreamer/dRonin
python/ins/cins.py
11
3838
from sympy import symbols, lambdify, sqrt from sympy import MatrixSymbol, Matrix from numpy import cos, sin, power from sympy.matrices import * from quaternions import * import numpy import ins # this is the set of (currently) recommend INS settings. modified from # https://raw.githubusercontent.com/wiki/TauLabs/TauLabs/files/htfpv-sparky-nav_20130527.uav default_mag_var = numpy.array([10.0, 10.0, 100.0]) default_gyro_var = numpy.array([1e-5, 1e-5, 1e-4]) default_accel_var = numpy.array([0.01, 0.01, 0.01]) default_baro_var = 0.1 default_gps_var=numpy.array([1e-3,1e-2,10]) class CINS: GRAV = 9.805 def __init__(self): """ Creates the CINS class. Important variables are * X - the vector of state variables * Xd - the vector of state derivatives for state and inputs * Y - the vector of outputs for current state value """ self.state = [] def configure(self, mag_var=None, gyro_var=None, accel_var=None, baro_var=None, gps_var=None): """ configure the INS parameters """ if mag_var is not None: ins.configure(mag_var=mag_var) if gyro_var is not None: ins.configure(gyro_var=gyro_var) if accel_var is not None: ins.configure(accel_var=accel_var) if baro_var is not None: ins.configure(baro_var=baro_var) if gps_var is not None: ins.configure(gps_var=gps_var) def prepare(self): """ prepare the C INS wrapper """ self.state = ins.init() self.configure( mag_var=default_mag_var, gyro_var=default_gyro_var, accel_var=default_accel_var, baro_var=default_baro_var, gps_var=default_gps_var ) def predict(self, gyros, accels, dT = 1.0/666.0): """ Perform the prediction step """ self.state = ins.prediction(gyros, accels, dT) def correction(self, pos=None, vel=None, mag=None, baro=None): """ Perform the INS correction based on the provided corrections """ sensors = 0 Z = numpy.zeros((10,),numpy.float64) # the masks must match the values in insgps.h if pos is not None: sensors = sensors | 0x0003 Z[0] = pos[0] Z[1] = pos[1] if vel is not None: sensors = sensors | 0x0038 Z[3] = vel[0] Z[4] = vel[1] Z[5] = vel[2] if mag is not None: sensors = sensors | 0x01C0 Z[6] = mag[0] Z[7] = mag[1] Z[8] = mag[2] if baro is not None: sensors = sensors | 0x0200 Z[9] = baro self.state = ins.correction(Z, sensors) def test(): """ test the INS with simulated data """ from numpy import cos, sin import matplotlib.pyplot as plt fig, ax = plt.subplots(2,2) sim = PyINS() sim.prepare() dT = 1.0 / 666.0 STEPS = 100000 history = numpy.zeros((STEPS,16)) history_rpy = numpy.zeros((STEPS,3)) times = numpy.zeros((STEPS,1)) for k in range(STEPS): ROLL = 0.1 YAW = 0.2 sim.predict(U=[0,0,YAW, 0, PyINS.GRAV*sin(ROLL), -PyINS.GRAV*cos(ROLL) - 0.0], dT=dT) history[k,:] = sim.state history_rpy[k,:] = quat_rpy(sim.state[6:10]) times[k] = k * dT angle = 0*numpy.pi/3 + YAW * dT * k # radians height = 1.0 * k * dT if True and k % 60 == 59: sim.correction(pos=[[10],[5],[-height]]) if True and k % 60 == 59: sim.correction(vel=[[0],[0],[-1]]) if k % 20 == 8: sim.correction(baro=[height]) if True and k % 20 == 15: sim.correction(mag=[[400 * cos(angle)], [-400 * sin(angle)], [1600]]) if k % 1000 == 0: ax[0][0].cla() ax[0][0].plot(times[0:k:4],history[0:k:4,0:3]) ax[0][0].set_title('Position') ax[0][1].cla() ax[0][1].plot(times[0:k:4],history[0:k:4,3:6]) ax[0][1].set_title('Velocity') plt.sca(ax[0][1]) plt.ylim(-2,2) ax[1][0].cla() ax[1][0].plot(times[0:k:4],history_rpy[0:k:4,:]) ax[1][0].set_title('Attitude') ax[1][1].cla() ax[1][1].plot(times[0:k:4],history[0:k:4,10:]) ax[1][1].set_title('Biases') plt.draw() fig.show() plt.show() if __name__ =='__main__': test()
gpl-3.0
costypetrisor/scikit-learn
examples/tree/plot_tree_regression_multioutput.py
43
1791
""" =================================================================== Multi-output Decision Tree Regression =================================================================== An example to illustrate multi-output regression with decision tree. The :ref:`decision trees <tree>` is used to predict simultaneously the noisy x and y observations of a circle given a single underlying feature. As a result, it learns local linear regressions approximating the circle. We can see that if the maximum depth of the tree (controlled by the `max_depth` parameter) is set too high, the decision trees learn too fine details of the training data and learn from the noise, i.e. they overfit. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn.tree import DecisionTreeRegressor # Create a random dataset rng = np.random.RandomState(1) X = np.sort(200 * rng.rand(100, 1) - 100, axis=0) y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T y[::5, :] += (0.5 - rng.rand(20, 2)) # Fit regression model clf_1 = DecisionTreeRegressor(max_depth=2) clf_2 = DecisionTreeRegressor(max_depth=5) clf_3 = DecisionTreeRegressor(max_depth=8) clf_1.fit(X, y) clf_2.fit(X, y) clf_3.fit(X, y) # Predict X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis] y_1 = clf_1.predict(X_test) y_2 = clf_2.predict(X_test) y_3 = clf_3.predict(X_test) # Plot the results plt.figure() plt.scatter(y[:, 0], y[:, 1], c="k", label="data") plt.scatter(y_1[:, 0], y_1[:, 1], c="g", label="max_depth=2") plt.scatter(y_2[:, 0], y_2[:, 1], c="r", label="max_depth=5") plt.scatter(y_3[:, 0], y_3[:, 1], c="b", label="max_depth=8") plt.xlim([-6, 6]) plt.ylim([-6, 6]) plt.xlabel("data") plt.ylabel("target") plt.title("Multi-output Decision Tree Regression") plt.legend() plt.show()
bsd-3-clause
sadimanna/computer_vision
clustering/kmeansppclustering_with_gap_statistic.py
1
2599
#K-Means++ Clustering with Gap Statistic to determine the optimal number of clusters import sys import numpy as np import scipy.io as sio #import matplotlib.pyplot as plt from sklearn.cluster import KMeans from sklearn.svm import SVC filename = sys.argv[1] datafile = sio.loadmat(filename) data = datafile['bow'] sizedata=[len(data), len(data[0])] disp = [] optimal_ks = [] #Determining the optimal number of k with gap statistic method def gap_statistic(data): sizedata = [len(data),len(data[0])] SD = [] gap = [] for knum in xrange(1,20): #I assumed that the number of clusters in my data won't be more than 20, this can be changed accordingly print knum #Clustering original Data kmeanspp = KMeans(n_clusters=knum,init = 'k-means++',max_iter = 100,n_jobs = 1) kmeanspp.fit(data) dispersion = kmeanspp.inertia_ #Clustering Reference Data nrefs = 10 refDisp = np.zeros(nrefs) for nref in xrange(nrefs): refdata = np.random.random_sample(tuple(sizedata)) refkmeans = KMeans(n_clusters=knum,init='k-means++',max_iter=100,n_jobs=1) refkmeans.fit(refdata) refdisp = refkmeans.inertia_ refDisp[nref]=np.log(refdisp) mean_log_refdisp = np.mean(refDisp) gap.append(mean_log_refdisp-np.log(dispersion)) sd = (sum([(r-m)**2 for r,m in zip(refDisp,[mean_log_refdisp]*nrefs)])/nrefs)**0.5 SD.append(sd) SD = [sd*((1+(1/nrefs))**0.5) for sd in SD] opt_k = None diff = [] for i in xrange(len(gap)-1): diff = (SD[i+1]-(gap[i+1]-gap[i])) if diff>0: opt_k = i+10 break if opt_k < 20: #print opt_k return opt_k else: return 20 #Returning 20 if opt_k is more than 20 in my case, as I wanted not to search more than 20. # Not required if range is larger. ntrials = 50 for ntrial in xrange(ntrials): print 'ntrial: ',ntrial optimal_ks.append(gap_statistic(data)) #For plotting the gap statistic measure #plt.plot(np.linspace(10,19,10,True),gap) #plt.show() unique_opt_k = list(set(optimal_ks)) k_count = {} count_opt_k = 0 second_opt_k = 0 opt_k = 0 for u_o_k in unique_opt_k: count = optimal_ks.count(u_o_k) k_count[u_o_k]=count if count>count_opt_k: count_opt_k = count opt_k = u_o_k elif count==count_opt_k: second_opt_k = u_o_k print opt_k print k_count #Clusterin with optimal number of k kmeanspp = KMeans(n_clusters = opt_k,init='k-means++',max_iter=100,n_jobs=1) kmeanspp.fit(data) centers = kmeanspp.cluster_centers_ clusterlabels = kmeanspp.labels_ print clusterlabels mdict = {} mdict['clusterlabels'] = clusterlabels sio.savemat('clusterlabels.mat',mdict,format = '4',oned_as = 'column') print 'dan dana dan done...'
gpl-3.0
sonalranjit/SECS
SECS_trace.py
2
1609
__author__ = 'sonal' import numpy as np from mpl_toolkits.basemap import Basemap import matplotlib.pyplot as plt from matplotlib import cm from mpl_toolkits.axes_grid1 import make_axes_locatable import os from math import * def polar_plot(grid, title): #z = grid[:,8] u = grid[:,8] v = grid[:,9] plt.figure(figsize=(18,18)) ax = plt.gca() #m = Basemap(projection='npaeqd',boundinglat=20,lon_0=-100.,resolution='l') m = Basemap(width=8000000, height=8000000, resolution='l', projection='lcc',\ lat_0=60,lon_0=-100.) m.drawcoastlines() m.drawparallels(np.arange(-80.,81,20.),labels=[1,0,0,0],fontsize=10) m.drawmeridians(np.arange(-180.,181.,20.),labels=[0,0,0,1],fontsize=10) x,y =m(grid[:,7],grid[:,6]) sc = m.scatter(x,y,s=abs(u),c=u,marker=',',cmap=cm.jet,alpha=0.9,edgecolors='none') plt.title(title) divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) cb1 = plt.colorbar(sc,cax=cax) cb1.set_label("mA/m",fontsize=18) plt.savefig('GOCE_asc_EICSu_krigged_201104.png',bbox_inches='tight',pad_inches=0.2) #plt.show() def asc_desc(data): asc = [] desc = [] lat = data[:,6] for i in range(0,len(data)-1): if lat[i+1] >= lat[i]: asc.append(i) else: desc.append(i) return asc, desc SECS_data = np.loadtxt('EICS_201103_krigged.txt') asc_idx, desc_idx= asc_desc(SECS_data) asc_track = SECS_data[asc_idx,:] desc_track = SECS_data[desc_idx,:] polar_plot(asc_track,'GOCE Ascending EICS u component Krigged April, 2011')
gpl-2.0
zfrenchee/pandas
pandas/core/api.py
1
3146
# pylint: disable=W0614,W0401,W0611 # flake8: noqa import numpy as np from pandas.core.algorithms import factorize, unique, value_counts from pandas.core.dtypes.missing import isna, isnull, notna, notnull from pandas.core.categorical import Categorical from pandas.core.groupby import Grouper from pandas.io.formats.format import set_eng_float_format from pandas.core.index import (Index, CategoricalIndex, Int64Index, UInt64Index, RangeIndex, Float64Index, MultiIndex, IntervalIndex, TimedeltaIndex, DatetimeIndex, PeriodIndex, NaT) from pandas.core.indexes.period import Period, period_range, pnow from pandas.core.indexes.timedeltas import Timedelta, timedelta_range from pandas.core.indexes.datetimes import Timestamp, date_range, bdate_range from pandas.core.indexes.interval import Interval, interval_range from pandas.core.series import Series from pandas.core.frame import DataFrame from pandas.core.panel import Panel, WidePanel from pandas.core.panel4d import Panel4D # TODO: Remove import when statsmodels updates #18264 from pandas.core.reshape.reshape import get_dummies from pandas.core.indexing import IndexSlice from pandas.core.tools.numeric import to_numeric from pandas.tseries.offsets import DateOffset from pandas.core.tools.datetimes import to_datetime from pandas.core.tools.timedeltas import to_timedelta # see gh-14094. from pandas.util._depr_module import _DeprecatedModule _removals = ['day', 'bday', 'businessDay', 'cday', 'customBusinessDay', 'customBusinessMonthEnd', 'customBusinessMonthBegin', 'monthEnd', 'yearEnd', 'yearBegin', 'bmonthEnd', 'bmonthBegin', 'cbmonthEnd', 'cbmonthBegin', 'bquarterEnd', 'quarterEnd', 'byearEnd', 'week'] datetools = _DeprecatedModule(deprmod='pandas.core.datetools', removals=_removals) from pandas.core.config import (get_option, set_option, reset_option, describe_option, option_context, options) # deprecation, xref #13790 def match(*args, **kwargs): import warnings warnings.warn("pd.match() is deprecated and will be removed " "in a future version", FutureWarning, stacklevel=2) from pandas.core.algorithms import match return match(*args, **kwargs) def groupby(*args, **kwargs): import warnings warnings.warn("pd.groupby() is deprecated and will be removed; " "Please use the Series.groupby() or " "DataFrame.groupby() methods", FutureWarning, stacklevel=2) return args[0].groupby(*args[1:], **kwargs) # Deprecation: xref gh-16747 class TimeGrouper(object): def __new__(cls, *args, **kwargs): from pandas.core.resample import TimeGrouper import warnings warnings.warn("pd.TimeGrouper is deprecated and will be removed; " "Please use pd.Grouper(freq=...)", FutureWarning, stacklevel=2) return TimeGrouper(*args, **kwargs)
bsd-3-clause
bmazin/SDR
Projects/ChannelizerSim/legacy/bin_width_1st_stage.py
1
1524
import matplotlib.pyplot as plt import scipy.signal import numpy as np import math import random from matplotlib.backends.backend_pdf import PdfPages samples = 51200 L = samples/512 fs = 512e6 dt = 1/fs time = [i*dt for i in range(samples)] def pfb_fir(x): N = len(x) T = 4 L = 512 bin_width_scale = 2.5 dx = T*math.pi/L/T X = np.array([n*dx-T*math.pi/2 for n in range(T*L)]) coeff = np.sinc(bin_width_scale*X/math.pi)*np.hanning(T*L) y = np.array([0+0j]*(N-T*L)) for n in range((T-1)*L, N): m = n%L coeff_sub = coeff[L*T-m::-L] y[n-T*L] = (x[n-(T-1)*L:n+L:L]*coeff_sub).sum() return y R = 100/5 #freqs = [i*1e5 + 6.0e6 for i in range(R)] freqs = [i*5e4 + 6.0e6 for i in range(R*8)] bin = [] bin_pfb = [] for f in freqs: print f signal = np.array([complex(math.cos(2*math.pi*f*t), math.sin(2*math.pi*f*t)) for t in time]) y = pfb_fir(signal) bin_pfb.append(np.fft.fft(y[0:512])[10]) bin = np.array(bin) bin_pfb = np.array(bin_pfb) freqs = np.array(freqs)/1e6 b = scipy.signal.firwin(20, cutoff=0.125, window="hanning") w,h = scipy.signal.freqz(b,1, 4*R, whole=1) h = np.array(h[2*R:4*R].tolist()+h[0:2*R].tolist()) #h = np.array(h[20:40].tolist()+h[0:20].tolist()) fig = plt.figure() ax0 = fig.add_subplot(111) #ax0.plot(freqs, abs(fir9), '.', freqs, abs(fir10), '.', freqs, abs(fir11), '.') ax0.plot(freqs, 10*np.log10(abs(bin_pfb)/512), 'k-') ax0.set_xlabel('Frequency (MHz)') ax0.set_ylabel('Gain (dB)') ax0.set_ylim((-50,0)) plt.show() #ax0.axvline(x = 10, linewidth=1, color='k')
gpl-2.0
RayMick/scikit-learn
examples/neighbors/plot_species_kde.py
282
4059
""" ================================================ Kernel Density Estimate of Species Distributions ================================================ This shows an example of a neighbors-based query (in particular a kernel density estimate) on geospatial data, using a Ball Tree built upon the Haversine distance metric -- i.e. distances over points in latitude/longitude. The dataset is provided by Phillips et. al. (2006). If available, the example uses `basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_ to plot the coast lines and national boundaries of South America. This example does not perform any learning over the data (see :ref:`example_applications_plot_species_distribution_modeling.py` for an example of classification based on the attributes in this dataset). It simply shows the kernel density estimate of observed data points in geospatial coordinates. The two species are: - `"Bradypus variegatus" <http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ , the Brown-throated Sloth. - `"Microryzomys minutus" <http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ , also known as the Forest Small Rice Rat, a rodent that lives in Peru, Colombia, Ecuador, Peru, and Venezuela. References ---------- * `"Maximum entropy modeling of species geographic distributions" <http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_ S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling, 190:231-259, 2006. """ # Author: Jake Vanderplas <jakevdp@cs.washington.edu> # # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import fetch_species_distributions from sklearn.datasets.species_distributions import construct_grids from sklearn.neighbors import KernelDensity # if basemap is available, we'll use it. # otherwise, we'll improvise later... try: from mpl_toolkits.basemap import Basemap basemap = True except ImportError: basemap = False # Get matrices/arrays of species IDs and locations data = fetch_species_distributions() species_names = ['Bradypus Variegatus', 'Microryzomys Minutus'] Xtrain = np.vstack([data['train']['dd lat'], data['train']['dd long']]).T ytrain = np.array([d.decode('ascii').startswith('micro') for d in data['train']['species']], dtype='int') Xtrain *= np.pi / 180. # Convert lat/long to radians # Set up the data grid for the contour plot xgrid, ygrid = construct_grids(data) X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1]) land_reference = data.coverages[6][::5, ::5] land_mask = (land_reference > -9999).ravel() xy = np.vstack([Y.ravel(), X.ravel()]).T xy = xy[land_mask] xy *= np.pi / 180. # Plot map of South America with distributions of each species fig = plt.figure() fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05) for i in range(2): plt.subplot(1, 2, i + 1) # construct a kernel density estimate of the distribution print(" - computing KDE in spherical coordinates") kde = KernelDensity(bandwidth=0.04, metric='haversine', kernel='gaussian', algorithm='ball_tree') kde.fit(Xtrain[ytrain == i]) # evaluate only on the land: -9999 indicates ocean Z = -9999 + np.zeros(land_mask.shape[0]) Z[land_mask] = np.exp(kde.score_samples(xy)) Z = Z.reshape(X.shape) # plot contours of the density levels = np.linspace(0, Z.max(), 25) plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds) if basemap: print(" - plot coastlines using basemap") m = Basemap(projection='cyl', llcrnrlat=Y.min(), urcrnrlat=Y.max(), llcrnrlon=X.min(), urcrnrlon=X.max(), resolution='c') m.drawcoastlines() m.drawcountries() else: print(" - plot coastlines from coverage") plt.contour(X, Y, land_reference, levels=[-9999], colors="k", linestyles="solid") plt.xticks([]) plt.yticks([]) plt.title(species_names[i]) plt.show()
bsd-3-clause
burjorjee/evolve-parities
evolveparities.py
1
5098
from contextlib import closing from matplotlib.pyplot import plot, figure, hold, axis, ylabel, xlabel, savefig, title from numpy import sort, logical_xor, transpose, logical_not from numpy.numarray.functions import cumsum, zeros from numpy.random import rand, shuffle from numpy import mod, floor import time import cloud from durus.file_storage import FileStorage from durus.connection import Connection def bitFreqVisualizer(effectiveAttrIndices, bitFreqs, gen): f = figure(1) n = len(bitFreqs) hold(False) plot(range(n), bitFreqs,'b.', markersize=10) hold(True) plot(effectiveAttrIndices, bitFreqs[effectiveAttrIndices],'r.', markersize=10) axis([0, n-1, 0, 1]) title("Generation = %s" % (gen,)) ylabel('Frequency of the Bit 1') xlabel('Locus') f.canvas.draw() f.show() def showExperimentTimeStamps(): with closing(FileStorage("results.durus")) as durus: conn = Connection(durus) return conn.get_root().keys() def neap_uga(m, n, gens, probMutation, effectiveAttrIndices, probMisclassification, bitFreqVisualizer=None): """ neap = "noisy effective attribute parity" """ pop = rand(m,n)<0.5 bitFreqHist= zeros((n,gens+1)) for t in range(gens+1): print "Generation %s" % t bitFreqs = pop.astype('float').sum(axis=0)/m bitFreqHist[:,t] = transpose(bitFreqs) if bitFreqVisualizer: bitFreqVisualizer(bitFreqs,t) fitnessVals = mod(pop[:, effectiveAttrIndices].astype('byte').sum(axis=1) + (rand(m) < probMisclassification).astype('byte'),2) totalFitness = sum (fitnessVals) cumNormFitnessVals = cumsum(fitnessVals).astype('float')/totalFitness parentIndices = zeros(2*m, dtype='int16') markers = sort(rand(2*m)) ctr = 0 for idx in xrange(2*m): while markers[idx]>cumNormFitnessVals[ctr]: ctr += 1 parentIndices[idx] = ctr shuffle(parentIndices) crossoverMasks = rand(m, n) < 0.5 newPop = zeros((m, n), dtype='bool') newPop[crossoverMasks] = pop[parentIndices[:m], :][crossoverMasks] newPop[logical_not(crossoverMasks)] = pop[parentIndices[m:], :][logical_not(crossoverMasks)] mutationMasks = rand(m, n)<probMutation pop = logical_xor(newPop,mutationMasks) return bitFreqHist[0, :], bitFreqHist[-1, :] def f(gens): k = 7 n= k + 1 effectiveAttrIndices = range(k) probMutation = 0.004 probMisclassification = 0.20 popSize = 1500 jid = cloud.call(neap_uga, **dict(m=popSize, n=n, gens=gens, probMutation=probMutation, effectiveAttrIndices=effectiveAttrIndices, probMisclassification=probMisclassification)) print "Kicked off trial %s" % jid return jid def cloud_result(jid): result = cloud.result(jid) print "Retrieved results for trial %s" % jid return result def run_trials(): numTrials = 3000 gens = 1000 from multiprocessing.pool import ThreadPool as Pool pool = Pool(50) jids = pool.map(f,[gens]*numTrials) print "Done spawning trials. Retrieving results..." results = pool.map(cloud_result, jids) firstLocusFreqsHists = zeros((numTrials,gens+1), dtype='float') lastLocusFreqsHists = zeros((numTrials,gens+1), dtype='float') print "Done retrieving results. Press Enter to serialize..." raw_input() for i, result in enumerate(results): firstLocusFreqsHists[i, :], lastLocusFreqsHists[i, :] = result with closing(FileStorage("results.durus")) as durus: conn = Connection(durus) conn.get_root()[str(int(floor(time.time())))] = (firstLocusFreqsHists, lastLocusFreqsHists) conn.commit() pool.close() pool.join() def render_results(timestamp=None): with closing(FileStorage("results.durus")) as durus: conn = Connection(durus) db = conn.get_root() if not timestamp: timestamp = sorted(db.keys())[-1] firstLocusFreqsHists, lastLocusFreqsHists = db[timestamp] print "Done deserializing results. Plotting..." x = [(2, 'First', firstLocusFreqsHists, "effective"), (3, 'Last', lastLocusFreqsHists, "non-effective")] for i, pos, freqsHists, filename in x : freqsHists = freqsHists[:,:801] f = figure(i) hold(False) plot(transpose(freqsHists), color='grey') hold(True) maxGens = freqsHists.shape[1]-1 plot([0, maxGens], [.05,.05], 'k--') plot([0, maxGens], [.95,.95], 'k--') axis([0, maxGens, 0, 1]) xlabel('Generation') ylabel('1-Frequency of the '+pos+' Locus') f.canvas.draw() f.show() savefig(filename+'.png', format='png', dpi=200) if __name__ == "__main__": cloud.start_simulator() run_trials() render_results() print "Done plotting results. Press Enter to end..." raw_input()
gpl-3.0
nomadcube/scikit-learn
examples/mixture/plot_gmm_pdf.py
284
1528
""" ============================================= Density Estimation for a mixture of Gaussians ============================================= Plot the density estimation of a mixture of two Gaussians. Data is generated from two Gaussians with different centers and covariance matrices. """ import numpy as np import matplotlib.pyplot as plt from matplotlib.colors import LogNorm from sklearn import mixture n_samples = 300 # generate random sample, two components np.random.seed(0) # generate spherical data centered on (20, 20) shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20]) # generate zero centered stretched Gaussian data C = np.array([[0., -0.7], [3.5, .7]]) stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C) # concatenate the two datasets into the final training set X_train = np.vstack([shifted_gaussian, stretched_gaussian]) # fit a Gaussian Mixture Model with two components clf = mixture.GMM(n_components=2, covariance_type='full') clf.fit(X_train) # display predicted scores by the model as a contour plot x = np.linspace(-20.0, 30.0) y = np.linspace(-20.0, 40.0) X, Y = np.meshgrid(x, y) XX = np.array([X.ravel(), Y.ravel()]).T Z = -clf.score_samples(XX)[0] Z = Z.reshape(X.shape) CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0), levels=np.logspace(0, 3, 10)) CB = plt.colorbar(CS, shrink=0.8, extend='both') plt.scatter(X_train[:, 0], X_train[:, 1], .8) plt.title('Negative log-likelihood predicted by a GMM') plt.axis('tight') plt.show()
bsd-3-clause
rbiswas4/SNsims
snsims_previous/snsims/tmp/models.py
1
2804
#!/usr/bin/env python import sncosmo.models import numpy class SEDFileSource(sncosmo.models.TimeSeriesSource): """A TimeSeriesSource stored in a 3-column ASCII file format, for PHASE, LAMBDA, and F_LAMBDA. The hash symbol # is a comment line. The spectral flux density of this model is given by .. math:: F(t, \lambda) = A \\times M(t, \lambda) where _M_ is the flux defined on a grid in phase and wavelength and _A_ (amplitude) is the single free parameter of the model. It should be noted that while t and \lambda are in the rest frame of the object, the flux density is defined at redshift zero. This means that for objects with the same intrinsic luminosity, the amplitude will be smaller for objects at larger luminosity distances. Parameters ---------- filename : str Name of the filename that contains the Time Series zero_before : bool, optional If True, flux at phases before minimum phase will be zeroed. The default is False, in which case the flux at such phases will be equal to the flux at the minimum phase (``flux[0, :]`` in the input array). version : str, optional Version of the model. Default is `None`. Returns ------- `~sncosmo.TimeSeriesSource` instance representing the TimeSeriesSource in file """ _param_names = ['amplitude'] param_names_latex = ['A'] def __init__(self, filename, zero_before=False, version=None): phase, wave, flux = numpy.loadtxt(filename, unpack=True) # Convert 3 column format to that expected by TimeSeriesSource phase_u = numpy.unique(phase) wave_u = numpy.unique(wave) lenp = len(phase_u) lenw = len(wave_u) if lenp * lenw != len(flux): raise TypeError('File is not a TimeSeriesSource') i = numpy.zeros(len(flux), dtype='int') j = numpy.zeros(len(flux), dtype='int') for index, p in enumerate(phase_u): i[phase == p] = index for index, w in enumerate(wave_u): j[wave == w] = index flux = flux[i * lenw + j] flux = numpy.reshape(flux, (lenp, lenw)) super(SEDFileSource, self).__init__(phase_u, wave_u, flux, zero_before=False, name=filename, version=None) if __name__ == '__main__': # filename = '/Users/akim/project/SNDATA_ROOT/snsed/NON1A/SDSS-019323.SED' # data = SEDFileSource(filename) sn = sncosmo.Model(source='snana-2007nc') print sn.param_names # wefwe import matplotlib.pyplot as plt plt.plot(data._wave, data.flux(0, data._wave)) plt.plot(sn.source._wave, sn.flux(0, sn.source._wave) * 0.95) plt.show()
mit
imaculate/scikit-learn
examples/ensemble/plot_adaboost_regression.py
311
1529
""" ====================================== Decision Tree Regression with AdaBoost ====================================== A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D sinusoidal dataset with a small amount of Gaussian noise. 299 boosts (300 decision trees) is compared with a single decision tree regressor. As the number of boosts is increased the regressor can fit more detail. .. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997. """ print(__doc__) # Author: Noel Dawe <noel.dawe@gmail.com> # # License: BSD 3 clause # importing necessary libraries import numpy as np import matplotlib.pyplot as plt from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import AdaBoostRegressor # Create the dataset rng = np.random.RandomState(1) X = np.linspace(0, 6, 100)[:, np.newaxis] y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0]) # Fit regression model regr_1 = DecisionTreeRegressor(max_depth=4) regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4), n_estimators=300, random_state=rng) regr_1.fit(X, y) regr_2.fit(X, y) # Predict y_1 = regr_1.predict(X) y_2 = regr_2.predict(X) # Plot the results plt.figure() plt.scatter(X, y, c="k", label="training samples") plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2) plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2) plt.xlabel("data") plt.ylabel("target") plt.title("Boosted Decision Tree Regression") plt.legend() plt.show()
bsd-3-clause
alexeyum/scikit-learn
sklearn/datasets/mlcomp.py
289
3855
# Copyright (c) 2010 Olivier Grisel <olivier.grisel@ensta.org> # License: BSD 3 clause """Glue code to load http://mlcomp.org data as a scikit.learn dataset""" import os import numbers from sklearn.datasets.base import load_files def _load_document_classification(dataset_path, metadata, set_=None, **kwargs): if set_ is not None: dataset_path = os.path.join(dataset_path, set_) return load_files(dataset_path, metadata.get('description'), **kwargs) LOADERS = { 'DocumentClassification': _load_document_classification, # TODO: implement the remaining domain formats } def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs): """Load a datasets as downloaded from http://mlcomp.org Parameters ---------- name_or_id : the integer id or the string name metadata of the MLComp dataset to load set_ : select the portion to load: 'train', 'test' or 'raw' mlcomp_root : the filesystem path to the root folder where MLComp datasets are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME environment variable is looked up instead. **kwargs : domain specific kwargs to be passed to the dataset loader. Read more in the :ref:`User Guide <datasets>`. Returns ------- data : Bunch Dictionary-like object, the interesting attributes are: 'filenames', the files holding the raw to learn, 'target', the classification labels (integer index), 'target_names', the meaning of the labels, and 'DESCR', the full description of the dataset. Note on the lookup process: depending on the type of name_or_id, will choose between integer id lookup or metadata name lookup by looking at the unzipped archives and metadata file. TODO: implement zip dataset loading too """ if mlcomp_root is None: try: mlcomp_root = os.environ['MLCOMP_DATASETS_HOME'] except KeyError: raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined") mlcomp_root = os.path.expanduser(mlcomp_root) mlcomp_root = os.path.abspath(mlcomp_root) mlcomp_root = os.path.normpath(mlcomp_root) if not os.path.exists(mlcomp_root): raise ValueError("Could not find folder: " + mlcomp_root) # dataset lookup if isinstance(name_or_id, numbers.Integral): # id lookup dataset_path = os.path.join(mlcomp_root, str(name_or_id)) else: # assume name based lookup dataset_path = None expected_name_line = "name: " + name_or_id for dataset in os.listdir(mlcomp_root): metadata_file = os.path.join(mlcomp_root, dataset, 'metadata') if not os.path.exists(metadata_file): continue with open(metadata_file) as f: for line in f: if line.strip() == expected_name_line: dataset_path = os.path.join(mlcomp_root, dataset) break if dataset_path is None: raise ValueError("Could not find dataset with metadata line: " + expected_name_line) # loading the dataset metadata metadata = dict() metadata_file = os.path.join(dataset_path, 'metadata') if not os.path.exists(metadata_file): raise ValueError(dataset_path + ' is not a valid MLComp dataset') with open(metadata_file) as f: for line in f: if ":" in line: key, value = line.split(":", 1) metadata[key.strip()] = value.strip() format = metadata.get('format', 'unknow') loader = LOADERS.get(format) if loader is None: raise ValueError("No loader implemented for format: " + format) return loader(dataset_path, metadata, set_=set_, **kwargs)
bsd-3-clause
wilselby/diy_driverless_car_ROS
rover_cv/camera_cal/src/camera_cal/camera_cal.py
1
6503
#!/usr/bin/env python # -*- coding: utf-8 -*- #https://github.com/paramaggarwal/CarND-Advanced-Lane-Lines/blob/master/Notebook.ipynb from __future__ import print_function from __future__ import division import sys import traceback import rospy import numpy as np import cv2 import pickle import glob import time import matplotlib.pyplot as plt import matplotlib.image as mpimg from std_msgs.msg import String from sensor_msgs.msg import Image from cv_bridge import CvBridge, CvBridgeError class camera_calibarion(object): def __init__(self): """ROS Subscriptions """ self.image_pub = rospy.Publisher("/camera_calibation/image_corrected",Image, queue_size=10) self.image_sub = rospy.Subscriber("/cam/camera_/image_raw",Image,self.cvt_image) """ Variables """ self.bridge = CvBridge() self.latestImage = None self.outputImage = None self.process = False self.calibrated = False self.correctedImage = None self.mtx = None self.dist = None def cvt_image(self,data): try: self.latestImage = self.bridge.imgmsg_to_cv2(data, "bgr8") except CvBridgeError as e: print(e) if self.process != True: self.process = True def camera_cal(self, image): # termination criteria criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) nx = 8 ny = 6 dst = np.copy(image) # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0) objp = np.zeros((ny * nx, 3), np.float32) objp[:,:2] = np.mgrid[0:nx, 0:ny].T.reshape(-1,2) # Arrays to store object points and image points from all the images. objpoints = [] # 3d points in real world space imgpoints = [] # 2d points in image plane. # Search for chessboard corners grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #ret_thresh, mask = cv2.threshold(grey, 30, 255, cv2.THRESH_BINARY) ret, corners = cv2.findChessboardCorners(image, (nx, ny), None) #flags=(cv2.cv.CV_CALIB_CB_ADAPTIVE_THRESH + cv2.cv.CV_CALIB_CB_FILTER_QUADS)) # If found, add object points, image points if ret == True: objpoints.append(objp) cv2.cornerSubPix(grey,corners, (11,11), (-1,-1), criteria) imgpoints.append(corners) self.calibrated = True print ("FOUND!") #Draw and display the corners cv2.drawChessboardCorners(image, (nx, ny), corners, ret) # Do camera calibration given object points and image points ret, self.mtx, self.dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, grey.shape[::-1], None, None) # Save the camera calibration result for later use (we won't worry about rvecs / tvecs) dist_pickle = {} dist_pickle["mtx"] = self.mtx dist_pickle["dist"] = self.dist dist_pickle['objpoints'] = objpoints dist_pickle['imgpoints'] = imgpoints pickle.dump( dist_pickle, open( "/home/wil/ros/catkin_ws/src/av_sim/computer_vision/camera_calibration/data/camera_cal_pickle.p", "wb" ) ) #else: #print("Searching...") return image def drawQuad(self, image, points, color=[255, 0, 0], thickness=4): p1, p2, p3, p4 = points cv2.line(image, tuple(p1), tuple(p2), color, thickness) cv2.line(image, tuple(p2), tuple(p3), color, thickness) cv2.line(image, tuple(p3), tuple(p4), color, thickness) cv2.line(image, tuple(p4), tuple(p1), color, thickness) def perspective_transform(self, image, debug=True, size_top=70, size_bottom=370): height, width = image.shape[0:2] output_size = height/2 #src = np.float32([[(width/2) - size_top, height*0.65], [(width/2) + size_top, height*0.65], [(width/2) + size_bottom, height-50], [(width/2) - size_bottom, height-50]]) src = np.float32([[512, 450], [675, 454], [707, 560], [347, 568]]) dst = np.float32([[347, height], [707, height], [707, 0], [347, 0]]) #dst = np.float32([[(width/2) - output_size, (height/2) - output_size], [(width/2) + output_size, (height/2) - output_size], [(width/2) + output_size, (height/2) + output_size], [(width/2) - output_size, (height/2) + output_size]]) M = cv2.getPerspectiveTransform(src, dst) print(M) warped = cv2.warpPerspective(image, M, (width, height), flags=cv2.INTER_LINEAR) if debug: self.drawQuad(image, src, [255, 0, 0]) self.drawQuad(image, dst, [255, 255, 0]) plt.imshow(image) plt.show() return warped def undistort_image(self, image): return cv2.undistort(image, self.mtx, self.dist, None, self.mtx) def run(self): while True: # Only run loop if we have an image if self.process: filename = "/home/wil/ros/catkin_ws/src/av_sim/computer_vision/camera_calibration/data/check_test.png" image = cv2.imread(filename, flags=cv2.IMREAD_COLOR) if self.calibrated is not True: #print("Calibrating...") cornersImage = self.camera_cal(image) cvImage = cornersImage else: correctedImage = self.undistort_image(self.latestImage) # Distortion Correction Function transformImage = self.perspective_transform(self.latestImage) cvImage = transformImage # Publish Undistorted Image try: imgmsg = self.bridge.cv2_to_imgmsg(cvImage, "bgr8") #"mono8" "bgr8" self.image_pub.publish(imgmsg) except CvBridgeError as e: print(e) def main(args): rospy.init_node('camera_calibarion', anonymous=True) cc = camera_calibarion() cc.run() try: rospy.spin() except KeyboardInterrupt: print("Shutting down") cv2.destroyAllWindows() if __name__ == '__main__': main(sys.argv)
bsd-2-clause
SamStudio8/scikit-bio
skbio/io/format/ordination.py
8
14555
r""" Ordination results format (:mod:`skbio.io.format.ordination`) ============================================================= .. currentmodule:: skbio.io.format.ordination The ordination results file format (``ordination``) stores the results of an ordination method in a human-readable, text-based format. The format supports storing the results of various ordination methods available in scikit-bio, including (but not necessarily limited to) PCoA, CA, RDA, and CCA. Format Support -------------- **Has Sniffer: Yes** +------+------+---------------------------------------------------------------+ |Reader|Writer| Object Class | +======+======+===============================================================+ |Yes |Yes |:mod:`skbio.stats.ordination.OrdinationResults` | +------+------+---------------------------------------------------------------+ Format Specification -------------------- The format is text-based, consisting of six attributes that describe the ordination results: - ``Eigvals``: 1-D - ``Proportion explained``: 1-D - ``Species``: 2-D - ``Site``: 2-D - ``Biplot``: 2-D - ``Site constraints``: 2-D The attributes in the file *must* be in this order. Each attribute is defined in its own section of the file, where sections are separated by a blank (or whitespace-only) line. Each attribute begins with a header line, which contains the attribute's name (as listed above), followed by a tab character, followed by one or more tab-separated dimensions (integers) that describe the shape of the attribute's data. The attribute's data follows its header line, and is stored in tab-separated format. ``Species``, ``Site``, and ``Site constraints`` store species and site IDs, respectively, as the first column, followed by the 2-D data array. An example of this file format might look like:: Eigvals<tab>4 0.36<tab>0.18<tab>0.07<tab>0.08 Proportion explained<tab>4 0.46<tab>0.23<tab>0.10<tab>0.10 Species<tab>9<tab>4 Species0<tab>0.11<tab>0.28<tab>-0.20<tab>-0.00 Species1<tab>0.14<tab>0.30<tab>0.39<tab>-0.14 Species2<tab>-1.01<tab>0.09<tab>-0.19<tab>-0.10 Species3<tab>-1.03<tab>0.10<tab>0.22<tab>0.22 Species4<tab>1.05<tab>0.53<tab>-0.43<tab>0.22 Species5<tab>0.99<tab>0.57<tab>0.67<tab>-0.38 Species6<tab>0.25<tab>-0.17<tab>-0.20<tab>0.43 Species7<tab>0.14<tab>-0.85<tab>-0.01<tab>0.05 Species8<tab>0.41<tab>-0.70<tab>0.21<tab>-0.69 Site<tab>10<tab>4 Site0<tab>0.71<tab>-3.08<tab>0.21<tab>-1.24 Site1<tab>0.58<tab>-3.00<tab>-0.94<tab>2.69 Site2<tab>0.76<tab>-3.15<tab>2.13<tab>-3.11 Site3<tab>1.11<tab>1.07<tab>-1.87<tab>0.66 Site4<tab>-0.97<tab>-0.06<tab>-0.69<tab>-0.61 Site5<tab>1.04<tab>0.45<tab>-0.63<tab>0.28 Site6<tab>-0.95<tab>-0.08<tab>0.13<tab>-0.42 Site7<tab>0.94<tab>-0.10<tab>0.52<tab>-0.00 Site8<tab>-1.14<tab>0.49<tab>0.47<tab>1.17 Site9<tab>1.03<tab>1.03<tab>2.74<tab>-1.28 Biplot<tab>3<tab>3 -0.16<tab>0.63<tab>0.76 -0.99<tab>0.06<tab>-0.04 0.18<tab>-0.97<tab>0.03 Site constraints<tab>10<tab>4 Site0<tab>0.69<tab>-3.08<tab>-0.32<tab>-1.24 Site1<tab>0.66<tab>-3.06<tab>0.23<tab>2.69 Site2<tab>0.63<tab>-3.04<tab>0.78<tab>-3.11 Site3<tab>1.10<tab>0.50<tab>-1.55<tab>0.66 Site4<tab>-0.97<tab>0.06<tab>-1.12<tab>-0.61 Site5<tab>1.05<tab>0.53<tab>-0.43<tab>0.28 Site6<tab>-1.02<tab>0.10<tab>-0.00<tab>-0.42 Site7<tab>0.99<tab>0.57<tab>0.67<tab>-0.00 Site8<tab>-1.08<tab>0.13<tab>1.11<tab>1.17 Site9<tab>0.94<tab>0.61<tab>1.79<tab>-1.28 If a given result attribute is not present (e.g. ``Biplot``), it should still be defined and declare its dimensions as 0. For example:: Biplot<tab>0<tab>0 All attributes are optional except for ``Eigvals``. Examples -------- Assume we have the following tab-delimited text file storing the ordination results in ``ordination`` format:: Eigvals<tab>4 0.36<tab>0.18<tab>0.07<tab>0.08 Proportion explained<tab>4 0.46<tab>0.23<tab>0.10<tab>0.10 Species<tab>9<tab>4 Species0<tab>0.11<tab>0.28<tab>-0.20<tab>-0.00 Species1<tab>0.14<tab>0.30<tab>0.39<tab>-0.14 Species2<tab>-1.01<tab>0.09<tab>-0.19<tab>-0.10 Species3<tab>-1.03<tab>0.10<tab>0.22<tab>0.22 Species4<tab>1.05<tab>0.53<tab>-0.43<tab>0.22 Species5<tab>0.99<tab>0.57<tab>0.67<tab>-0.38 Species6<tab>0.25<tab>-0.17<tab>-0.20<tab>0.43 Species7<tab>0.14<tab>-0.85<tab>-0.01<tab>0.05 Species8<tab>0.41<tab>-0.70<tab>0.21<tab>-0.69 Site<tab>10<tab>4 Site0<tab>0.71<tab>-3.08<tab>0.21<tab>-1.24 Site1<tab>0.58<tab>-3.00<tab>-0.94<tab>2.69 Site2<tab>0.76<tab>-3.15<tab>2.13<tab>-3.11 Site3<tab>1.11<tab>1.07<tab>-1.87<tab>0.66 Site4<tab>-0.97<tab>-0.06<tab>-0.69<tab>-0.61 Site5<tab>1.04<tab>0.45<tab>-0.63<tab>0.28 Site6<tab>-0.95<tab>-0.08<tab>0.13<tab>-0.42 Site7<tab>0.94<tab>-0.10<tab>0.52<tab>-0.00 Site8<tab>-1.14<tab>0.49<tab>0.47<tab>1.17 Site9<tab>1.03<tab>1.03<tab>2.74<tab>-1.28 Biplot<tab>0<tab>0 Site constraints<tab>0<tab>0 Load the ordination results from the file: >>> from io import StringIO >>> from skbio import OrdinationResults >>> or_f = StringIO( ... "Eigvals\t4\n" ... "0.36\t0.18\t0.07\t0.08\n" ... "\n" ... "Proportion explained\t4\n" ... "0.46\t0.23\t0.10\t0.10\n" ... "\n" ... "Species\t9\t4\n" ... "Species0\t0.11\t0.28\t-0.20\t-0.00\n" ... "Species1\t0.14\t0.30\t0.39\t-0.14\n" ... "Species2\t-1.01\t0.09\t-0.19\t-0.10\n" ... "Species3\t-1.03\t0.10\t0.22\t0.22\n" ... "Species4\t1.05\t0.53\t-0.43\t0.22\n" ... "Species5\t0.99\t0.57\t0.67\t-0.38\n" ... "Species6\t0.25\t-0.17\t-0.20\t0.43\n" ... "Species7\t0.14\t-0.85\t-0.01\t0.05\n" ... "Species8\t0.41\t-0.70\t0.21\t-0.69\n" ... "\n" ... "Site\t10\t4\n" ... "Site0\t0.71\t-3.08\t0.21\t-1.24\n" ... "Site1\t0.58\t-3.00\t-0.94\t2.69\n" ... "Site2\t0.76\t-3.15\t2.13\t-3.11\n" ... "Site3\t1.11\t1.07\t-1.87\t0.66\n" ... "Site4\t-0.97\t-0.06\t-0.69\t-0.61\n" ... "Site5\t1.04\t0.45\t-0.63\t0.28\n" ... "Site6\t-0.95\t-0.08\t0.13\t-0.42\n" ... "Site7\t0.94\t-0.10\t0.52\t-0.00\n" ... "Site8\t-1.14\t0.49\t0.47\t1.17\n" ... "Site9\t1.03\t1.03\t2.74\t-1.28\n" ... "\n" ... "Biplot\t0\t0\n" ... "\n" ... "Site constraints\t0\t0\n") >>> ord_res = OrdinationResults.read(or_f) """ # ---------------------------------------------------------------------------- # Copyright (c) 2013--, scikit-bio development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- from __future__ import (absolute_import, division, print_function, unicode_literals) from future.builtins import zip import numpy as np import pandas as pd from skbio._base import OrdinationResults from skbio.io import create_format, OrdinationFormatError ordination = create_format('ordination') @ordination.sniffer() def _ordination_sniffer(fh): # Smells an ordination file if *all* of the following lines are present # *from the beginning* of the file: # - eigvals header (minimally parsed) # - another line (contents ignored) # - a whitespace-only line # - proportion explained header (minimally parsed) try: _parse_header(fh, 'Eigvals', 1) next_line = next(fh, None) if next_line is not None: _check_empty_line(fh) _parse_header(fh, 'Proportion explained', 1) return True, {} except OrdinationFormatError: pass return False, {} @ordination.reader(OrdinationResults) def _ordination_to_ordination_results(fh): eigvals = _parse_vector_section(fh, 'Eigvals') if eigvals is None: raise OrdinationFormatError("At least one eigval must be present.") _check_empty_line(fh) prop_expl = _parse_vector_section(fh, 'Proportion explained') _check_length_against_eigvals(prop_expl, eigvals, 'proportion explained values') _check_empty_line(fh) species = _parse_array_section(fh, 'Species') _check_length_against_eigvals(species, eigvals, 'coordinates per species') _check_empty_line(fh) site = _parse_array_section(fh, 'Site') _check_length_against_eigvals(site, eigvals, 'coordinates per site') _check_empty_line(fh) # biplot does not have ids to parse (the other arrays do) biplot = _parse_array_section(fh, 'Biplot', has_ids=False) _check_empty_line(fh) cons = _parse_array_section(fh, 'Site constraints') if cons is not None and site is not None: if not np.array_equal(cons.index, site.index): raise OrdinationFormatError( "Site constraints ids and site ids must be equal: %s != %s" % (cons.index, site.index)) return OrdinationResults( short_method_name='', long_method_name='', eigvals=eigvals, features=species, samples=site, biplot_scores=biplot, sample_constraints=cons, proportion_explained=prop_expl) def _parse_header(fh, header_id, num_dimensions): line = next(fh, None) if line is None: raise OrdinationFormatError( "Reached end of file while looking for %s header." % header_id) header = line.strip().split('\t') # +1 for the header ID if len(header) != num_dimensions + 1 or header[0] != header_id: raise OrdinationFormatError("%s header not found." % header_id) return header def _check_empty_line(fh): """Check that the next line in `fh` is empty or whitespace-only.""" line = next(fh, None) if line is None: raise OrdinationFormatError( "Reached end of file while looking for blank line separating " "sections.") if line.strip(): raise OrdinationFormatError("Expected an empty line.") def _check_length_against_eigvals(data, eigvals, label): if data is not None: num_vals = data.shape[-1] num_eigvals = eigvals.shape[-1] if num_vals != num_eigvals: raise OrdinationFormatError( "There should be as many %s as eigvals: %d != %d" % (label, num_vals, num_eigvals)) def _parse_vector_section(fh, header_id): header = _parse_header(fh, header_id, 1) # Parse how many values we are waiting for num_vals = int(header[1]) if num_vals == 0: # The ordination method didn't generate the vector, so set it to None vals = None else: # Parse the line with the vector values line = next(fh, None) if line is None: raise OrdinationFormatError( "Reached end of file while looking for line containing values " "for %s section." % header_id) vals = pd.Series(np.asarray(line.strip().split('\t'), dtype=np.float64)) if len(vals) != num_vals: raise OrdinationFormatError( "Expected %d values in %s section, but found %d." % (num_vals, header_id, len(vals))) return vals def _parse_array_section(fh, header_id, has_ids=True): """Parse an array section of `fh` identified by `header_id`.""" # Parse the array header header = _parse_header(fh, header_id, 2) # Parse the dimensions of the array rows = int(header[1]) cols = int(header[2]) ids = None if rows == 0 and cols == 0: # The ordination method didn't generate the array data for 'header', so # set it to None data = None elif rows == 0 or cols == 0: # Both dimensions should be 0 or none of them are zero raise OrdinationFormatError("One dimension of %s is 0: %d x %d" % (header_id, rows, cols)) else: # Parse the data data = np.empty((rows, cols), dtype=np.float64) if has_ids: ids = [] for i in range(rows): # Parse the next row of data line = next(fh, None) if line is None: raise OrdinationFormatError( "Reached end of file while looking for row %d in %s " "section." % (i + 1, header_id)) vals = line.strip().split('\t') if has_ids: ids.append(vals[0]) vals = vals[1:] if len(vals) != cols: raise OrdinationFormatError( "Expected %d values, but found %d in row %d." % (cols, len(vals), i + 1)) data[i, :] = np.asarray(vals, dtype=np.float64) data = pd.DataFrame(data, index=ids) return data @ordination.writer(OrdinationResults) def _ordination_results_to_ordination(obj, fh): _write_vector_section(fh, 'Eigvals', obj.eigvals) _write_vector_section(fh, 'Proportion explained', obj.proportion_explained) _write_array_section(fh, 'Species', obj.features) _write_array_section(fh, 'Site', obj.samples) _write_array_section(fh, 'Biplot', obj.biplot_scores, has_ids=False) _write_array_section(fh, 'Site constraints', obj.sample_constraints, include_section_separator=False) def _write_vector_section(fh, header_id, vector): if vector is None: shape = 0 else: shape = vector.shape[0] fh.write("%s\t%d\n" % (header_id, shape)) if vector is not None: fh.write(_format_vector(vector.values)) fh.write("\n") def _write_array_section(fh, header_id, data, has_ids=True, include_section_separator=True): # write section header if data is None: shape = (0, 0) else: shape = data.shape fh.write("%s\t%d\t%d\n" % (header_id, shape[0], shape[1])) # write section data if data is not None: if not has_ids: for vals in data.values: fh.write(_format_vector(vals)) else: for id_, vals in zip(data.index, data.values): fh.write(_format_vector(vals, id_)) if include_section_separator: fh.write("\n") def _format_vector(vector, id_=None): formatted_vector = '\t'.join(np.asarray(vector, dtype=np.str)) if id_ is None: return "%s\n" % formatted_vector else: return "%s\t%s\n" % (id_, formatted_vector)
bsd-3-clause
petebachant/CFT-vectors
cft_vectors.py
1
18584
#!/usr/bin/env python """ This script generates a force and velocity vector diagram for a cross-flow turbine. """ from __future__ import division, print_function import numpy as np import matplotlib import matplotlib.pyplot as plt import pandas as pd from scipy.interpolate import interp1d import seaborn as sns from pxl.styleplot import set_sns import os # Define some colors (some from the Seaborn deep palette) blue = sns.color_palette()[0] green = sns.color_palette()[1] dark_gray = (0.3, 0.3, 0.3) red = sns.color_palette()[2] purple = sns.color_palette()[3] tan = sns.color_palette()[4] light_blue = sns.color_palette()[5] def load_foildata(): """Loads NACA 0020 airfoil data at Re = 2.1 x 10^5.""" Re = 2.1e5 foil = "0020" fname = "NACA {}_T1_Re{:.3f}_M0.00_N9.0.dat".format(foil, Re/1e6) fpath = "data/{}".format(fname) alpha, cl, cd = np.loadtxt(fpath, skiprows=14, unpack=True) if alpha[0] != 0.0: alpha = np.append([0.0], alpha[:-1]) cl = np.append([1e-12], cl[:-1]) cd = np.append(cd[0], cd[:-1]) # Mirror data about 0 degrees AoA since it's a symmetrical foil alpha = np.append(-np.flipud(alpha), alpha) cl = np.append(-np.flipud(cl), cl) cd = np.append(np.flipud(cd), cd) df = pd.DataFrame() df["alpha_deg"] = alpha df["cl"] = cl df["cd"] = cd return df def lookup_foildata(alpha_deg): """Lookup foil characteristics at given angle of attack.""" alpha_deg = np.asarray(alpha_deg) df = load_foildata() df["alpha_rad"] = np.deg2rad(df.alpha_deg) f_cl = interp1d(df.alpha_deg, df.cl, bounds_error=False) f_cd = interp1d(df.alpha_deg, df.cd, bounds_error=False) f_ct = interp1d(df.alpha_deg, df.cl*np.sin(df.alpha_rad) \ - df.cd*np.cos(df.alpha_rad), bounds_error=False) cl, cd, ct = f_cl(alpha_deg), f_cd(alpha_deg), f_ct(alpha_deg) return {"cl": cl, "cd": cd, "ct": ct} def calc_cft_ctorque(tsr=2.0, chord=0.14, R=0.5): """Calculate the geometric torque coefficient for a CFT.""" U_infty = 1.0 omega = tsr*U_infty/R theta_blade_deg = np.arange(0, 721) theta_blade_rad = np.deg2rad(theta_blade_deg) blade_vel_mag = omega*R blade_vel_x = blade_vel_mag*np.cos(theta_blade_rad) blade_vel_y = blade_vel_mag*np.sin(theta_blade_rad) u = U_infty # No induction rel_vel_mag = np.sqrt((blade_vel_x + u)**2 + blade_vel_y**2) rel_vel_x = u + blade_vel_x rel_vel_y = blade_vel_y relvel_dot_bladevel = (blade_vel_x*rel_vel_x + blade_vel_y*rel_vel_y) alpha_rad = np.arccos(relvel_dot_bladevel/(rel_vel_mag*blade_vel_mag)) alpha_rad[theta_blade_deg > 180] *= -1 alpha_deg = np.rad2deg(alpha_rad) foil_coeffs = lookup_foildata(alpha_deg) ctorque = foil_coeffs["ct"]*chord/(2*R)*rel_vel_mag**2/U_infty**2 cdx = -foil_coeffs["cd"]*np.sin(np.pi/2 - alpha_rad + theta_blade_rad) clx = foil_coeffs["cl"]*np.cos(np.pi/2 - alpha_rad - theta_blade_rad) df = pd.DataFrame() df["theta"] = theta_blade_deg df["alpha_deg"] = alpha_deg df["rel_vel_mag"] = rel_vel_mag df["ctorque"] = ctorque df["cdrag"] = clx + cdx return df def mag(v): """ Return magnitude of 2-D vector (input as a tuple, list, or NumPy array). """ return np.sqrt(v[0]**2 + v[1]**2) def rotate(v, rad): """Rotate a 2-D vector by rad radians.""" dc, ds = np.cos(rad), np.sin(rad) x, y = v[0], v[1] x, y = dc*x - ds*y, ds*x + dc*y return np.array((x, y)) def gen_naca_points(naca="0020", c=100, npoints=100, tuples=True): """Generate points for a NACA foil.""" x = np.linspace(0, 1, npoints)*c t = float(naca[2:])/100.0 y = 5.0*t*c*(0.2969*np.sqrt(x/c) - 0.1260*(x/c) - 0.3516*(x/c)**2 \ + 0.2843*(x/c)**3 - 0.1015*(x/c)**4) y = np.append(y, -y[::-1]) x = np.append(x, x[::-1]) if tuples: return np.array([(x0, y0) for x0, y0 in zip(x, y)]) else: return x, y def test_gen_naca_points(): points = gen_naca_points() x = [] y = [] for p in points: x.append(p[0]) y.append(p[1]) fig, ax = plt.subplots() ax.plot(x, y, "o") ax.set_aspect(1) plt.show() def plot_radius(ax, theta_deg=0): """Plot radius at given azimuthal angle.""" r = 0.495 theta_rad = np.deg2rad(theta_deg) x2, y2 = r*np.cos(theta_rad), r*np.sin(theta_rad) ax.plot((0, x2), (0, y2), "gray", linewidth=2) def plot_center(ax, length=0.07, linewidth=1.2): """Plot centermark at origin.""" ax.plot((0, 0), (-length/2, length/2), lw=linewidth, color="black") ax.plot((-length/2, length/2), (0, 0), lw=linewidth, color="black") def make_naca_path(c=0.3, theta_deg=0.0): verts = gen_naca_points(c=c) verts = np.array([rotate(v, -np.pi/2) for v in verts]) verts += (0.5, c/4) theta_rad = np.deg2rad(theta_deg) verts = np.array([rotate(v, theta_rad) for v in verts]) p = matplotlib.path.Path(verts, closed=True) return p def plot_foil(ax, c=0.3, theta_deg=0.0): """Plot the foil shape using a matplotlib patch.""" p = matplotlib.patches.PathPatch(make_naca_path(c, theta_deg), facecolor="gray", linewidth=1, edgecolor="gray") ax.add_patch(p) def plot_blade_path(ax, R=0.5): """Plot blade path as a dashed line.""" p = plt.Circle((0, 0), R, linestyle="dashed", edgecolor="black", facecolor="none", linewidth=1) ax.add_patch(p) def plot_vectors(fig, ax, theta_deg=0.0, tsr=2.0, c=0.3, label=False): """Plot blade velocity, free stream velocity, relative velocity, lift, and drag vectors. """ r = 0.5 u_infty = 0.26 theta_deg %= 360 theta_rad = np.deg2rad(theta_deg) blade_xy = r*np.cos(theta_rad), r*np.sin(theta_rad) head_width = 0.04 head_length = 0.11 linewidth = 1.5 # Function for plotting vector labels def plot_label(text, x, y, dx, dy, text_width=0.09, text_height=0.03, sign=-1, dist=1.0/3.0): text_width *= plt.rcParams["font.size"]/12*6/fig.get_size_inches()[1] text_height *= plt.rcParams["font.size"]/12*6/fig.get_size_inches()[1] dvec = np.array((dx, dy)) perp_vec = rotate(dvec, np.pi/2) perp_vec /= mag(perp_vec) if theta_deg > 270: diag = text_height else: diag = np.array((text_width, text_height)) # Projection of text diagonal vector onto normal vector proj = np.dot(diag, perp_vec) if sign != -1: proj = 0 # Text is on right side of vector if theta_deg > 180: sign *= -1 dxlab, dylab = perp_vec*(np.abs(proj) + .01)*sign xlab, ylab = x + dx*dist + dxlab, y + dy*dist + dylab ax.text(xlab, ylab, text) # Make blade velocity vector x1, y1 = rotate((0.5, tsr*u_infty), np.deg2rad(theta_deg)) dx, dy = np.array(blade_xy) - np.array((x1, y1)) blade_vel = np.array((dx, dy)) ax.arrow(x1, y1, dx, dy, head_width=head_width, head_length=head_length, length_includes_head=True, color=dark_gray, linewidth=linewidth) if label: plot_label(r"$-\omega r$", x1, y1, dx*0.25, dy*0.5) # Make chord line vector x1c, y1c = np.array((x1, y1)) - np.array((dx, dy))*0.5 x2c, y2c = np.array((x1, y1)) + np.array((dx, dy))*2 ax.plot([x1c, x2c], [y1c, y2c], marker=None, color="k", linestyle="-.", zorder=1) # Make free stream velocity vector y1 += u_infty ax.arrow(x1, y1, 0, -u_infty, head_width=head_width, head_length=head_length, length_includes_head=True, color=blue, linewidth=linewidth) u_infty = np.array((0, -u_infty)) if label: dy = -mag(u_infty) plot_label(r"$U_\mathrm{in}$", x1, y1, 0, dy, text_width=0.1) # Make relative velocity vector dx, dy = np.array(blade_xy) - np.array((x1, y1)) rel_vel = u_infty + blade_vel ax.plot((x1, x1 + dx), (y1, y1 + dy), lw=0) ax.arrow(x1, y1, dx, dy, head_width=head_width, head_length=head_length, length_includes_head=True, color=tan, linewidth=linewidth) if label: plot_label(r"$U_\mathrm{rel}$", x1, y1, dx, dy, sign=1, text_width=0.11) # Calculate angle between blade vel and rel vel alpha_deg = np.rad2deg(np.arccos(np.dot(blade_vel/mag(blade_vel), rel_vel/mag(rel_vel)))) if theta_deg > 180: alpha_deg *= -1 # Make drag vector drag_amplify = 3.0 data = lookup_foildata(alpha_deg) drag = data["cd"]*mag(rel_vel)**2*drag_amplify if drag < 0.4/drag_amplify: hs = 0.5 else: hs = 1 dx, dy = drag*np.array((dx, dy))/mag((dx, dy)) ax.arrow(blade_xy[0], blade_xy[1], dx, dy, head_width=head_width*hs, head_length=head_length*hs, length_includes_head=True, color=red, linewidth=linewidth) if label: plot_label(r"$F_d$", blade_xy[0], blade_xy[1], dx, dy, sign=-1, dist=0.66) # Make lift vector lift_amplify = 1.5 lift = data["cl"]*mag(rel_vel)**2*lift_amplify dx, dy = rotate((dx, dy), -np.pi/2)/mag((dx, dy))*lift if np.abs(lift) < 0.4/lift_amplify: hs = 0.5 else: hs = 1 ax.plot((blade_xy[0], blade_xy[0] + dx), (blade_xy[1], blade_xy[1] + dy), linewidth=0) ax.arrow(blade_xy[0], blade_xy[1], dx, dy, head_width=head_width*hs, head_length=head_length*hs, length_includes_head=True, color=green, linewidth=linewidth) if label: plot_label(r"$F_l$", blade_xy[0], blade_xy[1], dx, dy, sign=-1, text_width=0.12, text_height=0.02, dist=0.66) # Label radius if label: plot_label("$r$", 0, 0, blade_xy[0], blade_xy[1], text_width=0.04, text_height=0.04) # Label angle of attack if label: ast = "simple,head_width={},tail_width={},head_length={}".format( head_width*8, linewidth/16, head_length*8) xy = blade_xy - rel_vel/mag(rel_vel)*0.2 ax.annotate(r"$\alpha$", xy=xy, xycoords="data", xytext=(37.5, 22.5), textcoords="offset points", arrowprops=dict(arrowstyle=ast, ec="none", connectionstyle="arc3,rad=0.1", color="k")) xy = blade_xy - blade_vel/mag(blade_vel)*0.2 ax.annotate("", xy=xy, xycoords="data", xytext=(-15, -30), textcoords="offset points", arrowprops=dict(arrowstyle=ast, ec="none", connectionstyle="arc3,rad=-0.1", color="k")) # Label azimuthal angle if label: xy = np.array(blade_xy)*0.6 ast = "simple,head_width={},tail_width={},head_length={}".format( head_width*5.5, linewidth/22, head_length*5.5) ax.annotate(r"$\theta$", xy=xy, xycoords="data", xytext=(0.28, 0.12), textcoords="data", arrowprops=dict(arrowstyle=ast, ec="none", connectionstyle="arc3,rad=0.1", color="k")) ax.annotate("", xy=(0.41, 0), xycoords="data", xytext=(0.333, 0.12), textcoords="data", arrowprops=dict(arrowstyle=ast, ec="none", connectionstyle="arc3,rad=-0.1", color="k")) # Label pitching moment if label: xy = np.array(blade_xy)*1.1 - blade_vel/mag(blade_vel) * c/4 ast = "simple,head_width={},tail_width={},head_length={}".format( head_width*8, linewidth/16, head_length*8) ax.annotate(r"", xy=xy, xycoords="data", xytext=(25, -15), textcoords="offset points", arrowprops=dict(arrowstyle=ast, ec="none", connectionstyle="arc3,rad=0.6", color="k")) plot_label(r"$M$", xy[0], xy[1], 0.1, 0.1, sign=-1, dist=0.66) return {"u_infty": u_infty, "blade_vel": blade_vel, "rel_vel": rel_vel} def plot_alpha(ax=None, tsr=2.0, theta=None, alpha_ss=None, **kwargs): """Plot angle of attack versus azimuthal angle.""" if theta is not None: theta %= 360 if ax is None: fig, ax = plt.subplots() df = calc_cft_ctorque(tsr=tsr) ax.plot(df.theta, df.alpha_deg, **kwargs) ax.set_ylabel(r"$\alpha$ (degrees)") ax.set_xlabel(r"$\theta$ (degrees)") ax.set_xlim((0, 360)) ylim = np.round(df.alpha_deg.max() + 5) ax.set_ylim((-ylim, ylim)) if theta is not None: f = interp1d(df.theta, df.alpha_deg) ax.plot(theta, f(theta), "ok") if alpha_ss is not None: ax.hlines((alpha_ss, -alpha_ss), 0, 360, linestyles="dashed") def plot_rel_vel_mag(ax=None, tsr=2.0, theta=None, **kwargs): """Plot relative velocity magnitude versus azimuthal angle.""" if theta is not None: theta %= 360 if ax is None: fig, ax = plt.subplots() df = calc_cft_ctorque(tsr=tsr) ax.plot(df.theta, df.rel_vel_mag, **kwargs) ax.set_ylabel(r"$|\vec{U}_\mathrm{rel}|$") ax.set_xlabel(r"$\theta$ (degrees)") ax.set_xlim((0, 360)) if theta is not None: f = interp1d(df.theta, df.rel_vel_mag) ax.plot(theta, f(theta), "ok") def plot_alpha_relvel_all(tsrs=np.arange(1.5, 6.1, 1.0), save=False): """Plot angle of attack and relative velocity magnitude for a list of TSRs. Figure will have two subplots in a single row. """ fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(7.5, 3.0)) cm = plt.cm.get_cmap("Reds") for tsr in tsrs: color = cm(tsr/np.max(tsrs)) plot_alpha(ax=ax1, tsr=tsr, label=r"$\lambda = {}$".format(tsr), color=color) plot_rel_vel_mag(ax=ax2, tsr=tsr, color=color) [a.set_xticks(np.arange(0, 361, 60)) for a in (ax1, ax2)] ax1.legend(loc=(0.17, 1.1), ncol=len(tsrs)) ax1.set_ylim((-45, 45)) ax1.set_yticks(np.arange(-45, 46, 15)) ax2.set_ylabel(r"$|\vec{U}_\mathrm{rel}|/U_\infty$") fig.tight_layout() if save: fig.savefig("figures/alpha_deg_urel_geom.pdf", bbox_inches="tight") def plot_ctorque(ax=None, tsr=2.0, theta=None, **kwargs): """Plot torque coefficient versus azimuthal angle.""" theta %= 360 if ax is None: fig, ax = plt.subplots() df = calc_cft_ctorque(tsr=tsr) ax.plot(df.theta, df.ctorque, **kwargs) ax.set_ylabel("Torque coeff.") ax.set_xlabel(r"$\theta$ (degrees)") ax.set_xlim((0, 360)) if theta is not None: f = interp1d(df.theta, df.ctorque) ax.plot(theta, f(theta), "ok") def plot_diagram(fig=None, ax=None, theta_deg=0.0, tsr=2.0, label=False, save=False, axis="on", full_view=True): """Plot full vector diagram.""" if ax is None: fig, ax = plt.subplots(figsize=(6, 6)) plot_blade_path(ax) if label: # Create dashed line for x-axis ax.plot((-0.5, 0.5), (0, 0), linestyle="dashed", color="k", zorder=1) plot_foil(ax, c=0.3, theta_deg=theta_deg) plot_radius(ax, theta_deg) plot_center(ax) plot_vectors(fig, ax, theta_deg, tsr, label=label) # Figure formatting if full_view: ax.set_xlim((-1, 1)) ax.set_ylim((-1, 1)) ax.set_aspect(1) ax.set_xticks([]) ax.set_yticks([]) ax.axis(axis) if save: fig.savefig("figures/cft-vectors.pdf") def plot_all(theta_deg=0.0, tsr=2.0, scale=1.0, full_view=True): """Create diagram and plots of kinematics in a single figure.""" fig = plt.figure(figsize=(7.5*scale, 4.75*scale)) # Draw vector diagram ax1 = plt.subplot2grid((3, 3), (0, 0), colspan=2, rowspan=3) plot_diagram(fig, ax1, theta_deg, tsr, axis="on", full_view=full_view) # Plot angle of attack ax2 = plt.subplot2grid((3, 3), (0, 2)) plot_alpha(ax2, tsr=tsr, theta=theta_deg, alpha_ss=18, color=light_blue) # Plot relative velocity magnitude ax3 = plt.subplot2grid((3, 3), (1, 2)) plot_rel_vel_mag(ax3, tsr=tsr, theta=theta_deg, color=tan) # Plot torque coefficient ax4 = plt.subplot2grid((3, 3), (2, 2)) plot_ctorque(ax4, tsr=tsr, theta=theta_deg, color=purple) fig.tight_layout() return fig def make_frame(t): """Make a frame for a movie.""" sec_per_rev = 5.0 deg = t/sec_per_rev*360 return mplfig_to_npimage(plot_all(deg, scale=2.0)) def make_animation(filetype="mp4", fps=30): """Make animation video.""" if not os.path.isdir("videos"): os.mkdir("videos") animation = VideoClip(make_frame, duration=5.0) if "mp4" in filetype.lower(): animation.write_videofile("videos/cft-animation.mp4", fps=fps) elif "gif" in filetype.lower(): animation.write_gif("videos/cft-animation.gif", fps=fps) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(description="Create cross-flow turbine \ vector diagrams.") parser.add_argument("create", choices=["figure", "diagram", "animation"], help="Either create a static figure or animation") parser.add_argument("--angle", type=float, default=60.0, help="Angle (degrees) to create figure") parser.add_argument("--show", action="store_true", default=False) parser.add_argument("--save", "-s", action="store_true", default=False, help="Save figure") args = parser.parse_args() if args.save: if not os.path.isdir("figures"): os.mkdir("figures") if args.create == "diagram": set_sns(font_scale=2) plot_diagram(theta_deg=args.angle, label=True, axis="off", save=args.save) elif args.create == "figure": set_sns() plot_alpha_relvel_all(save=args.save) elif args.create == "animation": set_sns(font_scale=2) from moviepy.editor import VideoClip from moviepy.video.io.bindings import mplfig_to_npimage make_animation() if args.show: plt.show()
mit
samuel1208/scikit-learn
sklearn/decomposition/truncated_svd.py
199
7744
"""Truncated SVD for sparse matrices, aka latent semantic analysis (LSA). """ # Author: Lars Buitinck <L.J.Buitinck@uva.nl> # Olivier Grisel <olivier.grisel@ensta.org> # Michael Becker <mike@beckerfuffle.com> # License: 3-clause BSD. import numpy as np import scipy.sparse as sp try: from scipy.sparse.linalg import svds except ImportError: from ..utils.arpack import svds from ..base import BaseEstimator, TransformerMixin from ..utils import check_array, as_float_array, check_random_state from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip from ..utils.sparsefuncs import mean_variance_axis __all__ = ["TruncatedSVD"] class TruncatedSVD(BaseEstimator, TransformerMixin): """Dimensionality reduction using truncated SVD (aka LSA). This transformer performs linear dimensionality reduction by means of truncated singular value decomposition (SVD). It is very similar to PCA, but operates on sample vectors directly, instead of on a covariance matrix. This means it can work with scipy.sparse matrices efficiently. In particular, truncated SVD works on term count/tf-idf matrices as returned by the vectorizers in sklearn.feature_extraction.text. In that context, it is known as latent semantic analysis (LSA). This estimator supports two algorithm: a fast randomized SVD solver, and a "naive" algorithm that uses ARPACK as an eigensolver on (X * X.T) or (X.T * X), whichever is more efficient. Read more in the :ref:`User Guide <LSA>`. Parameters ---------- n_components : int, default = 2 Desired dimensionality of output data. Must be strictly less than the number of features. The default value is useful for visualisation. For LSA, a value of 100 is recommended. algorithm : string, default = "randomized" SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy (scipy.sparse.linalg.svds), or "randomized" for the randomized algorithm due to Halko (2009). n_iter : int, optional Number of iterations for randomized SVD solver. Not used by ARPACK. random_state : int or RandomState, optional (Seed for) pseudo-random number generator. If not given, the numpy.random singleton is used. tol : float, optional Tolerance for ARPACK. 0 means machine precision. Ignored by randomized SVD solver. Attributes ---------- components_ : array, shape (n_components, n_features) explained_variance_ratio_ : array, [n_components] Percentage of variance explained by each of the selected components. explained_variance_ : array, [n_components] The variance of the training samples transformed by a projection to each component. Examples -------- >>> from sklearn.decomposition import TruncatedSVD >>> from sklearn.random_projection import sparse_random_matrix >>> X = sparse_random_matrix(100, 100, density=0.01, random_state=42) >>> svd = TruncatedSVD(n_components=5, random_state=42) >>> svd.fit(X) # doctest: +NORMALIZE_WHITESPACE TruncatedSVD(algorithm='randomized', n_components=5, n_iter=5, random_state=42, tol=0.0) >>> print(svd.explained_variance_ratio_) # doctest: +ELLIPSIS [ 0.07825... 0.05528... 0.05445... 0.04997... 0.04134...] >>> print(svd.explained_variance_ratio_.sum()) # doctest: +ELLIPSIS 0.27930... See also -------- PCA RandomizedPCA References ---------- Finding structure with randomness: Stochastic algorithms for constructing approximate matrix decompositions Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061 Notes ----- SVD suffers from a problem called "sign indeterminancy", which means the sign of the ``components_`` and the output from transform depend on the algorithm and random state. To work around this, fit instances of this class to data once, then keep the instance around to do transformations. """ def __init__(self, n_components=2, algorithm="randomized", n_iter=5, random_state=None, tol=0.): self.algorithm = algorithm self.n_components = n_components self.n_iter = n_iter self.random_state = random_state self.tol = tol def fit(self, X, y=None): """Fit LSI model on training data X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. Returns ------- self : object Returns the transformer object. """ self.fit_transform(X) return self def fit_transform(self, X, y=None): """Fit LSI model to X and perform dimensionality reduction on X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data. Returns ------- X_new : array, shape (n_samples, n_components) Reduced version of X. This will always be a dense array. """ X = as_float_array(X, copy=False) random_state = check_random_state(self.random_state) # If sparse and not csr or csc, convert to csr if sp.issparse(X) and X.getformat() not in ["csr", "csc"]: X = X.tocsr() if self.algorithm == "arpack": U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol) # svds doesn't abide by scipy.linalg.svd/randomized_svd # conventions, so reverse its outputs. Sigma = Sigma[::-1] U, VT = svd_flip(U[:, ::-1], VT[::-1]) elif self.algorithm == "randomized": k = self.n_components n_features = X.shape[1] if k >= n_features: raise ValueError("n_components must be < n_features;" " got %d >= %d" % (k, n_features)) U, Sigma, VT = randomized_svd(X, self.n_components, n_iter=self.n_iter, random_state=random_state) else: raise ValueError("unknown algorithm %r" % self.algorithm) self.components_ = VT # Calculate explained variance & explained variance ratio X_transformed = np.dot(U, np.diag(Sigma)) self.explained_variance_ = exp_var = np.var(X_transformed, axis=0) if sp.issparse(X): _, full_var = mean_variance_axis(X, axis=0) full_var = full_var.sum() else: full_var = np.var(X, axis=0).sum() self.explained_variance_ratio_ = exp_var / full_var return X_transformed def transform(self, X): """Perform dimensionality reduction on X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) New data. Returns ------- X_new : array, shape (n_samples, n_components) Reduced version of X. This will always be a dense array. """ X = check_array(X, accept_sparse='csr') return safe_sparse_dot(X, self.components_.T) def inverse_transform(self, X): """Transform X back to its original space. Returns an array X_original whose transform would be X. Parameters ---------- X : array-like, shape (n_samples, n_components) New data. Returns ------- X_original : array, shape (n_samples, n_features) Note that this is always a dense array. """ X = check_array(X) return np.dot(X, self.components_)
bsd-3-clause
nmartensen/pandas
pandas/tests/indexing/test_callable.py
14
8721
# -*- coding: utf-8 -*- # pylint: disable-msg=W0612,E1101 import numpy as np import pandas as pd import pandas.util.testing as tm class TestIndexingCallable(object): def test_frame_loc_ix_callable(self): # GH 11485 df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': list('aabb'), 'C': [1, 2, 3, 4]}) # iloc cannot use boolean Series (see GH3635) # return bool indexer res = df.loc[lambda x: x.A > 2] tm.assert_frame_equal(res, df.loc[df.A > 2]) res = df.loc[lambda x: x.A > 2] tm.assert_frame_equal(res, df.loc[df.A > 2]) res = df.loc[lambda x: x.A > 2, ] tm.assert_frame_equal(res, df.loc[df.A > 2, ]) res = df.loc[lambda x: x.A > 2, ] tm.assert_frame_equal(res, df.loc[df.A > 2, ]) res = df.loc[lambda x: x.B == 'b', :] tm.assert_frame_equal(res, df.loc[df.B == 'b', :]) res = df.loc[lambda x: x.B == 'b', :] tm.assert_frame_equal(res, df.loc[df.B == 'b', :]) res = df.loc[lambda x: x.A > 2, lambda x: x.columns == 'B'] tm.assert_frame_equal(res, df.loc[df.A > 2, [False, True, False]]) res = df.loc[lambda x: x.A > 2, lambda x: x.columns == 'B'] tm.assert_frame_equal(res, df.loc[df.A > 2, [False, True, False]]) res = df.loc[lambda x: x.A > 2, lambda x: 'B'] tm.assert_series_equal(res, df.loc[df.A > 2, 'B']) res = df.loc[lambda x: x.A > 2, lambda x: 'B'] tm.assert_series_equal(res, df.loc[df.A > 2, 'B']) res = df.loc[lambda x: x.A > 2, lambda x: ['A', 'B']] tm.assert_frame_equal(res, df.loc[df.A > 2, ['A', 'B']]) res = df.loc[lambda x: x.A > 2, lambda x: ['A', 'B']] tm.assert_frame_equal(res, df.loc[df.A > 2, ['A', 'B']]) res = df.loc[lambda x: x.A == 2, lambda x: ['A', 'B']] tm.assert_frame_equal(res, df.loc[df.A == 2, ['A', 'B']]) res = df.loc[lambda x: x.A == 2, lambda x: ['A', 'B']] tm.assert_frame_equal(res, df.loc[df.A == 2, ['A', 'B']]) # scalar res = df.loc[lambda x: 1, lambda x: 'A'] assert res == df.loc[1, 'A'] res = df.loc[lambda x: 1, lambda x: 'A'] assert res == df.loc[1, 'A'] def test_frame_loc_ix_callable_mixture(self): # GH 11485 df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': list('aabb'), 'C': [1, 2, 3, 4]}) res = df.loc[lambda x: x.A > 2, ['A', 'B']] tm.assert_frame_equal(res, df.loc[df.A > 2, ['A', 'B']]) res = df.loc[lambda x: x.A > 2, ['A', 'B']] tm.assert_frame_equal(res, df.loc[df.A > 2, ['A', 'B']]) res = df.loc[[2, 3], lambda x: ['A', 'B']] tm.assert_frame_equal(res, df.loc[[2, 3], ['A', 'B']]) res = df.loc[[2, 3], lambda x: ['A', 'B']] tm.assert_frame_equal(res, df.loc[[2, 3], ['A', 'B']]) res = df.loc[3, lambda x: ['A', 'B']] tm.assert_series_equal(res, df.loc[3, ['A', 'B']]) res = df.loc[3, lambda x: ['A', 'B']] tm.assert_series_equal(res, df.loc[3, ['A', 'B']]) def test_frame_loc_callable(self): # GH 11485 df = pd.DataFrame({'X': [1, 2, 3, 4], 'Y': list('aabb')}, index=list('ABCD')) # return label res = df.loc[lambda x: ['A', 'C']] tm.assert_frame_equal(res, df.loc[['A', 'C']]) res = df.loc[lambda x: ['A', 'C'], ] tm.assert_frame_equal(res, df.loc[['A', 'C'], ]) res = df.loc[lambda x: ['A', 'C'], :] tm.assert_frame_equal(res, df.loc[['A', 'C'], :]) res = df.loc[lambda x: ['A', 'C'], lambda x: 'X'] tm.assert_series_equal(res, df.loc[['A', 'C'], 'X']) res = df.loc[lambda x: ['A', 'C'], lambda x: ['X']] tm.assert_frame_equal(res, df.loc[['A', 'C'], ['X']]) # mixture res = df.loc[['A', 'C'], lambda x: 'X'] tm.assert_series_equal(res, df.loc[['A', 'C'], 'X']) res = df.loc[['A', 'C'], lambda x: ['X']] tm.assert_frame_equal(res, df.loc[['A', 'C'], ['X']]) res = df.loc[lambda x: ['A', 'C'], 'X'] tm.assert_series_equal(res, df.loc[['A', 'C'], 'X']) res = df.loc[lambda x: ['A', 'C'], ['X']] tm.assert_frame_equal(res, df.loc[['A', 'C'], ['X']]) def test_frame_loc_callable_setitem(self): # GH 11485 df = pd.DataFrame({'X': [1, 2, 3, 4], 'Y': list('aabb')}, index=list('ABCD')) # return label res = df.copy() res.loc[lambda x: ['A', 'C']] = -20 exp = df.copy() exp.loc[['A', 'C']] = -20 tm.assert_frame_equal(res, exp) res = df.copy() res.loc[lambda x: ['A', 'C'], :] = 20 exp = df.copy() exp.loc[['A', 'C'], :] = 20 tm.assert_frame_equal(res, exp) res = df.copy() res.loc[lambda x: ['A', 'C'], lambda x: 'X'] = -1 exp = df.copy() exp.loc[['A', 'C'], 'X'] = -1 tm.assert_frame_equal(res, exp) res = df.copy() res.loc[lambda x: ['A', 'C'], lambda x: ['X']] = [5, 10] exp = df.copy() exp.loc[['A', 'C'], ['X']] = [5, 10] tm.assert_frame_equal(res, exp) # mixture res = df.copy() res.loc[['A', 'C'], lambda x: 'X'] = np.array([-1, -2]) exp = df.copy() exp.loc[['A', 'C'], 'X'] = np.array([-1, -2]) tm.assert_frame_equal(res, exp) res = df.copy() res.loc[['A', 'C'], lambda x: ['X']] = 10 exp = df.copy() exp.loc[['A', 'C'], ['X']] = 10 tm.assert_frame_equal(res, exp) res = df.copy() res.loc[lambda x: ['A', 'C'], 'X'] = -2 exp = df.copy() exp.loc[['A', 'C'], 'X'] = -2 tm.assert_frame_equal(res, exp) res = df.copy() res.loc[lambda x: ['A', 'C'], ['X']] = -4 exp = df.copy() exp.loc[['A', 'C'], ['X']] = -4 tm.assert_frame_equal(res, exp) def test_frame_iloc_callable(self): # GH 11485 df = pd.DataFrame({'X': [1, 2, 3, 4], 'Y': list('aabb')}, index=list('ABCD')) # return location res = df.iloc[lambda x: [1, 3]] tm.assert_frame_equal(res, df.iloc[[1, 3]]) res = df.iloc[lambda x: [1, 3], :] tm.assert_frame_equal(res, df.iloc[[1, 3], :]) res = df.iloc[lambda x: [1, 3], lambda x: 0] tm.assert_series_equal(res, df.iloc[[1, 3], 0]) res = df.iloc[lambda x: [1, 3], lambda x: [0]] tm.assert_frame_equal(res, df.iloc[[1, 3], [0]]) # mixture res = df.iloc[[1, 3], lambda x: 0] tm.assert_series_equal(res, df.iloc[[1, 3], 0]) res = df.iloc[[1, 3], lambda x: [0]] tm.assert_frame_equal(res, df.iloc[[1, 3], [0]]) res = df.iloc[lambda x: [1, 3], 0] tm.assert_series_equal(res, df.iloc[[1, 3], 0]) res = df.iloc[lambda x: [1, 3], [0]] tm.assert_frame_equal(res, df.iloc[[1, 3], [0]]) def test_frame_iloc_callable_setitem(self): # GH 11485 df = pd.DataFrame({'X': [1, 2, 3, 4], 'Y': list('aabb')}, index=list('ABCD')) # return location res = df.copy() res.iloc[lambda x: [1, 3]] = 0 exp = df.copy() exp.iloc[[1, 3]] = 0 tm.assert_frame_equal(res, exp) res = df.copy() res.iloc[lambda x: [1, 3], :] = -1 exp = df.copy() exp.iloc[[1, 3], :] = -1 tm.assert_frame_equal(res, exp) res = df.copy() res.iloc[lambda x: [1, 3], lambda x: 0] = 5 exp = df.copy() exp.iloc[[1, 3], 0] = 5 tm.assert_frame_equal(res, exp) res = df.copy() res.iloc[lambda x: [1, 3], lambda x: [0]] = 25 exp = df.copy() exp.iloc[[1, 3], [0]] = 25 tm.assert_frame_equal(res, exp) # mixture res = df.copy() res.iloc[[1, 3], lambda x: 0] = -3 exp = df.copy() exp.iloc[[1, 3], 0] = -3 tm.assert_frame_equal(res, exp) res = df.copy() res.iloc[[1, 3], lambda x: [0]] = -5 exp = df.copy() exp.iloc[[1, 3], [0]] = -5 tm.assert_frame_equal(res, exp) res = df.copy() res.iloc[lambda x: [1, 3], 0] = 10 exp = df.copy() exp.iloc[[1, 3], 0] = 10 tm.assert_frame_equal(res, exp) res = df.copy() res.iloc[lambda x: [1, 3], [0]] = [-5, -5] exp = df.copy() exp.iloc[[1, 3], [0]] = [-5, -5] tm.assert_frame_equal(res, exp)
bsd-3-clause
vybstat/scikit-learn
examples/linear_model/plot_ard.py
248
2622
""" ================================================== Automatic Relevance Determination Regression (ARD) ================================================== Fit regression model with Bayesian Ridge Regression. See :ref:`bayesian_ridge_regression` for more information on the regressor. Compared to the OLS (ordinary least squares) estimator, the coefficient weights are slightly shifted toward zeros, which stabilises them. The histogram of the estimated weights is very peaked, as a sparsity-inducing prior is implied on the weights. The estimation of the model is done by iteratively maximizing the marginal log-likelihood of the observations. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from scipy import stats from sklearn.linear_model import ARDRegression, LinearRegression ############################################################################### # Generating simulated data with Gaussian weights # Parameters of the example np.random.seed(0) n_samples, n_features = 100, 100 # Create Gaussian data X = np.random.randn(n_samples, n_features) # Create weigts with a precision lambda_ of 4. lambda_ = 4. w = np.zeros(n_features) # Only keep 10 weights of interest relevant_features = np.random.randint(0, n_features, 10) for i in relevant_features: w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_)) # Create noite with a precision alpha of 50. alpha_ = 50. noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples) # Create the target y = np.dot(X, w) + noise ############################################################################### # Fit the ARD Regression clf = ARDRegression(compute_score=True) clf.fit(X, y) ols = LinearRegression() ols.fit(X, y) ############################################################################### # Plot the true weights, the estimated weights and the histogram of the # weights plt.figure(figsize=(6, 5)) plt.title("Weights of the model") plt.plot(clf.coef_, 'b-', label="ARD estimate") plt.plot(ols.coef_, 'r--', label="OLS estimate") plt.plot(w, 'g-', label="Ground truth") plt.xlabel("Features") plt.ylabel("Values of the weights") plt.legend(loc=1) plt.figure(figsize=(6, 5)) plt.title("Histogram of the weights") plt.hist(clf.coef_, bins=n_features, log=True) plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)), 'ro', label="Relevant features") plt.ylabel("Features") plt.xlabel("Values of the weights") plt.legend(loc=1) plt.figure(figsize=(6, 5)) plt.title("Marginal log-likelihood") plt.plot(clf.scores_) plt.ylabel("Score") plt.xlabel("Iterations") plt.show()
bsd-3-clause
tracierenea/gnuradio
gr-filter/examples/channelize.py
58
7003
#!/usr/bin/env python # # Copyright 2009,2012,2013 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # from gnuradio import gr from gnuradio import blocks from gnuradio import filter import sys, time try: from gnuradio import analog except ImportError: sys.stderr.write("Error: Program requires gr-analog.\n") sys.exit(1) try: import scipy from scipy import fftpack except ImportError: sys.stderr.write("Error: Program requires scipy (see: www.scipy.org).\n") sys.exit(1) try: import pylab from pylab import mlab except ImportError: sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n") sys.exit(1) class pfb_top_block(gr.top_block): def __init__(self): gr.top_block.__init__(self) self._N = 2000000 # number of samples to use self._fs = 1000 # initial sampling rate self._M = M = 9 # Number of channels to channelize self._ifs = M*self._fs # initial sampling rate # Create a set of taps for the PFB channelizer self._taps = filter.firdes.low_pass_2(1, self._ifs, 475.50, 50, attenuation_dB=100, window=filter.firdes.WIN_BLACKMAN_hARRIS) # Calculate the number of taps per channel for our own information tpc = scipy.ceil(float(len(self._taps)) / float(self._M)) print "Number of taps: ", len(self._taps) print "Number of channels: ", self._M print "Taps per channel: ", tpc # Create a set of signals at different frequencies # freqs lists the frequencies of the signals that get stored # in the list "signals", which then get summed together self.signals = list() self.add = blocks.add_cc() freqs = [-70, -50, -30, -10, 10, 20, 40, 60, 80] for i in xrange(len(freqs)): f = freqs[i] + (M/2-M+i+1)*self._fs self.signals.append(analog.sig_source_c(self._ifs, analog.GR_SIN_WAVE, f, 1)) self.connect(self.signals[i], (self.add,i)) self.head = blocks.head(gr.sizeof_gr_complex, self._N) # Construct the channelizer filter self.pfb = filter.pfb.channelizer_ccf(self._M, self._taps, 1) # Construct a vector sink for the input signal to the channelizer self.snk_i = blocks.vector_sink_c() # Connect the blocks self.connect(self.add, self.head, self.pfb) self.connect(self.add, self.snk_i) # Use this to play with the channel mapping #self.pfb.set_channel_map([5,6,7,8,0,1,2,3,4]) # Create a vector sink for each of M output channels of the filter and connect it self.snks = list() for i in xrange(self._M): self.snks.append(blocks.vector_sink_c()) self.connect((self.pfb, i), self.snks[i]) def main(): tstart = time.time() tb = pfb_top_block() tb.run() tend = time.time() print "Run time: %f" % (tend - tstart) if 1: fig_in = pylab.figure(1, figsize=(16,9), facecolor="w") fig1 = pylab.figure(2, figsize=(16,9), facecolor="w") fig2 = pylab.figure(3, figsize=(16,9), facecolor="w") Ns = 1000 Ne = 10000 fftlen = 8192 winfunc = scipy.blackman fs = tb._ifs # Plot the input signal on its own figure d = tb.snk_i.data()[Ns:Ne] spin_f = fig_in.add_subplot(2, 1, 1) X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs, window = lambda d: d*winfunc(fftlen), scale_by_freq=True) X_in = 10.0*scipy.log10(abs(X)) f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size)) pin_f = spin_f.plot(f_in, X_in, "b") spin_f.set_xlim([min(f_in), max(f_in)+1]) spin_f.set_ylim([-200.0, 50.0]) spin_f.set_title("Input Signal", weight="bold") spin_f.set_xlabel("Frequency (Hz)") spin_f.set_ylabel("Power (dBW)") Ts = 1.0/fs Tmax = len(d)*Ts t_in = scipy.arange(0, Tmax, Ts) x_in = scipy.array(d) spin_t = fig_in.add_subplot(2, 1, 2) pin_t = spin_t.plot(t_in, x_in.real, "b") pin_t = spin_t.plot(t_in, x_in.imag, "r") spin_t.set_xlabel("Time (s)") spin_t.set_ylabel("Amplitude") Ncols = int(scipy.floor(scipy.sqrt(tb._M))) Nrows = int(scipy.floor(tb._M / Ncols)) if(tb._M % Ncols != 0): Nrows += 1 # Plot each of the channels outputs. Frequencies on Figure 2 and # time signals on Figure 3 fs_o = tb._fs Ts_o = 1.0/fs_o Tmax_o = len(d)*Ts_o for i in xrange(len(tb.snks)): # remove issues with the transients at the beginning # also remove some corruption at the end of the stream # this is a bug, probably due to the corner cases d = tb.snks[i].data()[Ns:Ne] sp1_f = fig1.add_subplot(Nrows, Ncols, 1+i) X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o, window = lambda d: d*winfunc(fftlen), scale_by_freq=True) X_o = 10.0*scipy.log10(abs(X)) f_o = scipy.arange(-fs_o/2.0, fs_o/2.0, fs_o/float(X_o.size)) p2_f = sp1_f.plot(f_o, X_o, "b") sp1_f.set_xlim([min(f_o), max(f_o)+1]) sp1_f.set_ylim([-200.0, 50.0]) sp1_f.set_title(("Channel %d" % i), weight="bold") sp1_f.set_xlabel("Frequency (Hz)") sp1_f.set_ylabel("Power (dBW)") x_o = scipy.array(d) t_o = scipy.arange(0, Tmax_o, Ts_o) sp2_o = fig2.add_subplot(Nrows, Ncols, 1+i) p2_o = sp2_o.plot(t_o, x_o.real, "b") p2_o = sp2_o.plot(t_o, x_o.imag, "r") sp2_o.set_xlim([min(t_o), max(t_o)+1]) sp2_o.set_ylim([-2, 2]) sp2_o.set_title(("Channel %d" % i), weight="bold") sp2_o.set_xlabel("Time (s)") sp2_o.set_ylabel("Amplitude") pylab.show() if __name__ == "__main__": try: main() except KeyboardInterrupt: pass
gpl-3.0
UltronAI/Deep-Learning
Pattern-Recognition/hw2-Feature-Selection/skfeature/example/test_JMI.py
1
1528
import scipy.io from sklearn.metrics import accuracy_score from sklearn import cross_validation from sklearn import svm from skfeature.function.information_theoretical_based import JMI def main(): # load data mat = scipy.io.loadmat('../data/colon.mat') X = mat['X'] # data X = X.astype(float) y = mat['Y'] # label y = y[:, 0] n_samples, n_features = X.shape # number of samples and number of features # split data into 10 folds ss = cross_validation.KFold(n_samples, n_folds=10, shuffle=True) # perform evaluation on classification task num_fea = 10 # number of selected features clf = svm.LinearSVC() # linear SVM correct = 0 for train, test in ss: # obtain the index of each feature on the training set idx,_,_ = JMI.jmi(X[train], y[train], n_selected_features=num_fea) # obtain the dataset on the selected features features = X[:, idx[0:num_fea]] # train a classification model with the selected features on the training dataset clf.fit(features[train], y[train]) # predict the class labels of test data y_predict = clf.predict(features[test]) # obtain the classification accuracy on the test data acc = accuracy_score(y[test], y_predict) correct = correct + acc # output the average classification accuracy over all 10 folds print 'Accuracy:', float(correct)/10 if __name__ == '__main__': main()
mit
fyffyt/scikit-learn
sklearn/preprocessing/tests/test_data.py
71
38516
import warnings import numpy as np import numpy.linalg as la from scipy import sparse from distutils.version import LooseVersion from sklearn.utils.testing import assert_almost_equal, clean_warning_registry from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_greater_equal from sklearn.utils.testing import assert_less_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_raises_regex from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_false from sklearn.utils.testing import assert_warns_message from sklearn.utils.testing import assert_no_warnings from sklearn.utils.testing import ignore_warnings from sklearn.utils.sparsefuncs import mean_variance_axis from sklearn.preprocessing.data import _transform_selected from sklearn.preprocessing.data import Binarizer from sklearn.preprocessing.data import KernelCenterer from sklearn.preprocessing.data import Normalizer from sklearn.preprocessing.data import normalize from sklearn.preprocessing.data import OneHotEncoder from sklearn.preprocessing.data import StandardScaler from sklearn.preprocessing.data import scale from sklearn.preprocessing.data import MinMaxScaler from sklearn.preprocessing.data import minmax_scale from sklearn.preprocessing.data import MaxAbsScaler from sklearn.preprocessing.data import maxabs_scale from sklearn.preprocessing.data import RobustScaler from sklearn.preprocessing.data import robust_scale from sklearn.preprocessing.data import add_dummy_feature from sklearn.preprocessing.data import PolynomialFeatures from sklearn.utils.validation import DataConversionWarning from sklearn import datasets iris = datasets.load_iris() def toarray(a): if hasattr(a, "toarray"): a = a.toarray() return a def test_polynomial_features(): # Test Polynomial Features X1 = np.arange(6)[:, np.newaxis] P1 = np.hstack([np.ones_like(X1), X1, X1 ** 2, X1 ** 3]) deg1 = 3 X2 = np.arange(6).reshape((3, 2)) x1 = X2[:, :1] x2 = X2[:, 1:] P2 = np.hstack([x1 ** 0 * x2 ** 0, x1 ** 1 * x2 ** 0, x1 ** 0 * x2 ** 1, x1 ** 2 * x2 ** 0, x1 ** 1 * x2 ** 1, x1 ** 0 * x2 ** 2]) deg2 = 2 for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]: P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X) assert_array_almost_equal(P_test, P) P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X) assert_array_almost_equal(P_test, P[:, 1:]) interact = PolynomialFeatures(2, interaction_only=True, include_bias=True) X_poly = interact.fit_transform(X) assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]]) @ignore_warnings def test_scaler_1d(): # Test scaling of dataset along single axis rng = np.random.RandomState(0) X = rng.randn(5) X_orig_copy = X.copy() scaler = StandardScaler() X_scaled = scaler.fit(X).transform(X, copy=False) assert_array_almost_equal(X_scaled.mean(axis=0), 0.0) assert_array_almost_equal(X_scaled.std(axis=0), 1.0) # check inverse transform X_scaled_back = scaler.inverse_transform(X_scaled) assert_array_almost_equal(X_scaled_back, X_orig_copy) # Test with 1D list X = [0., 1., 2, 0.4, 1.] scaler = StandardScaler() X_scaled = scaler.fit(X).transform(X, copy=False) assert_array_almost_equal(X_scaled.mean(axis=0), 0.0) assert_array_almost_equal(X_scaled.std(axis=0), 1.0) X_scaled = scale(X) assert_array_almost_equal(X_scaled.mean(axis=0), 0.0) assert_array_almost_equal(X_scaled.std(axis=0), 1.0) X = np.ones(5) assert_array_equal(scale(X, with_mean=False), X) def test_standard_scaler_numerical_stability(): """Test numerical stability of scaling""" # np.log(1e-5) is taken because of its floating point representation # was empirically found to cause numerical problems with np.mean & np.std. x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64) if LooseVersion(np.__version__) >= LooseVersion('1.9'): # This does not raise a warning as the number of samples is too low # to trigger the problem in recent numpy x_scaled = assert_no_warnings(scale, x) assert_array_almost_equal(scale(x), np.zeros(8)) else: w = "standard deviation of the data is probably very close to 0" x_scaled = assert_warns_message(UserWarning, w, scale, x) assert_array_almost_equal(x_scaled, np.zeros(8)) # with 2 more samples, the std computation run into numerical issues: x = np.zeros(10, dtype=np.float64) + np.log(1e-5, dtype=np.float64) w = "standard deviation of the data is probably very close to 0" x_scaled = assert_warns_message(UserWarning, w, scale, x) assert_array_almost_equal(x_scaled, np.zeros(10)) x = np.ones(10, dtype=np.float64) * 1e-100 x_small_scaled = assert_no_warnings(scale, x) assert_array_almost_equal(x_small_scaled, np.zeros(10)) # Large values can cause (often recoverable) numerical stability issues: x_big = np.ones(10, dtype=np.float64) * 1e100 w = "Dataset may contain too large values" x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big) assert_array_almost_equal(x_big_scaled, np.zeros(10)) assert_array_almost_equal(x_big_scaled, x_small_scaled) x_big_centered = assert_warns_message(UserWarning, w, scale, x_big, with_std=False) assert_array_almost_equal(x_big_centered, np.zeros(10)) assert_array_almost_equal(x_big_centered, x_small_scaled) def test_scaler_2d_arrays(): # Test scaling of 2d array along first axis rng = np.random.RandomState(0) X = rng.randn(4, 5) X[:, 0] = 0.0 # first feature is always of zero scaler = StandardScaler() X_scaled = scaler.fit(X).transform(X, copy=True) assert_false(np.any(np.isnan(X_scaled))) assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0]) assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.]) # Check that X has been copied assert_true(X_scaled is not X) # check inverse transform X_scaled_back = scaler.inverse_transform(X_scaled) assert_true(X_scaled_back is not X) assert_true(X_scaled_back is not X_scaled) assert_array_almost_equal(X_scaled_back, X) X_scaled = scale(X, axis=1, with_std=False) assert_false(np.any(np.isnan(X_scaled))) assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0]) X_scaled = scale(X, axis=1, with_std=True) assert_false(np.any(np.isnan(X_scaled))) assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0]) assert_array_almost_equal(X_scaled.std(axis=1), 4 * [1.0]) # Check that the data hasn't been modified assert_true(X_scaled is not X) X_scaled = scaler.fit(X).transform(X, copy=False) assert_false(np.any(np.isnan(X_scaled))) assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0]) assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.]) # Check that X has not been copied assert_true(X_scaled is X) X = rng.randn(4, 5) X[:, 0] = 1.0 # first feature is a constant, non zero feature scaler = StandardScaler() X_scaled = scaler.fit(X).transform(X, copy=True) assert_false(np.any(np.isnan(X_scaled))) assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0]) assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.]) # Check that X has not been copied assert_true(X_scaled is not X) def test_min_max_scaler_iris(): X = iris.data scaler = MinMaxScaler() # default params X_trans = scaler.fit_transform(X) assert_array_almost_equal(X_trans.min(axis=0), 0) assert_array_almost_equal(X_trans.min(axis=0), 0) assert_array_almost_equal(X_trans.max(axis=0), 1) X_trans_inv = scaler.inverse_transform(X_trans) assert_array_almost_equal(X, X_trans_inv) # not default params: min=1, max=2 scaler = MinMaxScaler(feature_range=(1, 2)) X_trans = scaler.fit_transform(X) assert_array_almost_equal(X_trans.min(axis=0), 1) assert_array_almost_equal(X_trans.max(axis=0), 2) X_trans_inv = scaler.inverse_transform(X_trans) assert_array_almost_equal(X, X_trans_inv) # min=-.5, max=.6 scaler = MinMaxScaler(feature_range=(-.5, .6)) X_trans = scaler.fit_transform(X) assert_array_almost_equal(X_trans.min(axis=0), -.5) assert_array_almost_equal(X_trans.max(axis=0), .6) X_trans_inv = scaler.inverse_transform(X_trans) assert_array_almost_equal(X, X_trans_inv) # raises on invalid range scaler = MinMaxScaler(feature_range=(2, 1)) assert_raises(ValueError, scaler.fit, X) def test_min_max_scaler_zero_variance_features(): # Check min max scaler on toy data with zero variance features X = [[0., 1., +0.5], [0., 1., -0.1], [0., 1., +1.1]] X_new = [[+0., 2., 0.5], [-1., 1., 0.0], [+0., 1., 1.5]] # default params scaler = MinMaxScaler() X_trans = scaler.fit_transform(X) X_expected_0_1 = [[0., 0., 0.5], [0., 0., 0.0], [0., 0., 1.0]] assert_array_almost_equal(X_trans, X_expected_0_1) X_trans_inv = scaler.inverse_transform(X_trans) assert_array_almost_equal(X, X_trans_inv) X_trans_new = scaler.transform(X_new) X_expected_0_1_new = [[+0., 1., 0.500], [-1., 0., 0.083], [+0., 0., 1.333]] assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2) # not default params scaler = MinMaxScaler(feature_range=(1, 2)) X_trans = scaler.fit_transform(X) X_expected_1_2 = [[1., 1., 1.5], [1., 1., 1.0], [1., 1., 2.0]] assert_array_almost_equal(X_trans, X_expected_1_2) # function interface X_trans = minmax_scale(X) assert_array_almost_equal(X_trans, X_expected_0_1) X_trans = minmax_scale(X, feature_range=(1, 2)) assert_array_almost_equal(X_trans, X_expected_1_2) def test_minmax_scale_axis1(): X = iris.data X_trans = minmax_scale(X, axis=1) assert_array_almost_equal(np.min(X_trans, axis=1), 0) assert_array_almost_equal(np.max(X_trans, axis=1), 1) @ignore_warnings def test_min_max_scaler_1d(): # Test scaling of dataset along single axis rng = np.random.RandomState(0) X = rng.randn(5) X_orig_copy = X.copy() scaler = MinMaxScaler() X_scaled = scaler.fit(X).transform(X) assert_array_almost_equal(X_scaled.min(axis=0), 0.0) assert_array_almost_equal(X_scaled.max(axis=0), 1.0) # check inverse transform X_scaled_back = scaler.inverse_transform(X_scaled) assert_array_almost_equal(X_scaled_back, X_orig_copy) # Test with 1D list X = [0., 1., 2, 0.4, 1.] scaler = MinMaxScaler() X_scaled = scaler.fit(X).transform(X) assert_array_almost_equal(X_scaled.min(axis=0), 0.0) assert_array_almost_equal(X_scaled.max(axis=0), 1.0) # Constant feature. X = np.zeros(5) scaler = MinMaxScaler() X_scaled = scaler.fit(X).transform(X) assert_greater_equal(X_scaled.min(), 0.) assert_less_equal(X_scaled.max(), 1.) def test_scaler_without_centering(): rng = np.random.RandomState(42) X = rng.randn(4, 5) X[:, 0] = 0.0 # first feature is always of zero X_csr = sparse.csr_matrix(X) X_csc = sparse.csc_matrix(X) assert_raises(ValueError, StandardScaler().fit, X_csr) null_transform = StandardScaler(with_mean=False, with_std=False, copy=True) X_null = null_transform.fit_transform(X_csr) assert_array_equal(X_null.data, X_csr.data) X_orig = null_transform.inverse_transform(X_null) assert_array_equal(X_orig.data, X_csr.data) scaler = StandardScaler(with_mean=False).fit(X) X_scaled = scaler.transform(X, copy=True) assert_false(np.any(np.isnan(X_scaled))) scaler_csr = StandardScaler(with_mean=False).fit(X_csr) X_csr_scaled = scaler_csr.transform(X_csr, copy=True) assert_false(np.any(np.isnan(X_csr_scaled.data))) scaler_csc = StandardScaler(with_mean=False).fit(X_csc) X_csc_scaled = scaler_csr.transform(X_csc, copy=True) assert_false(np.any(np.isnan(X_csc_scaled.data))) assert_equal(scaler.mean_, scaler_csr.mean_) assert_array_almost_equal(scaler.std_, scaler_csr.std_) assert_equal(scaler.mean_, scaler_csc.mean_) assert_array_almost_equal(scaler.std_, scaler_csc.std_) assert_array_almost_equal( X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2) assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.]) X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0) assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0)) assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0)) # Check that X has not been modified (copy) assert_true(X_scaled is not X) assert_true(X_csr_scaled is not X_csr) X_scaled_back = scaler.inverse_transform(X_scaled) assert_true(X_scaled_back is not X) assert_true(X_scaled_back is not X_scaled) assert_array_almost_equal(X_scaled_back, X) X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled) assert_true(X_csr_scaled_back is not X_csr) assert_true(X_csr_scaled_back is not X_csr_scaled) assert_array_almost_equal(X_csr_scaled_back.toarray(), X) X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc()) assert_true(X_csc_scaled_back is not X_csc) assert_true(X_csc_scaled_back is not X_csc_scaled) assert_array_almost_equal(X_csc_scaled_back.toarray(), X) def test_scaler_int(): # test that scaler converts integer input to floating # for both sparse and dense matrices rng = np.random.RandomState(42) X = rng.randint(20, size=(4, 5)) X[:, 0] = 0 # first feature is always of zero X_csr = sparse.csr_matrix(X) X_csc = sparse.csc_matrix(X) null_transform = StandardScaler(with_mean=False, with_std=False, copy=True) clean_warning_registry() with warnings.catch_warnings(record=True): X_null = null_transform.fit_transform(X_csr) assert_array_equal(X_null.data, X_csr.data) X_orig = null_transform.inverse_transform(X_null) assert_array_equal(X_orig.data, X_csr.data) clean_warning_registry() with warnings.catch_warnings(record=True): scaler = StandardScaler(with_mean=False).fit(X) X_scaled = scaler.transform(X, copy=True) assert_false(np.any(np.isnan(X_scaled))) clean_warning_registry() with warnings.catch_warnings(record=True): scaler_csr = StandardScaler(with_mean=False).fit(X_csr) X_csr_scaled = scaler_csr.transform(X_csr, copy=True) assert_false(np.any(np.isnan(X_csr_scaled.data))) clean_warning_registry() with warnings.catch_warnings(record=True): scaler_csc = StandardScaler(with_mean=False).fit(X_csc) X_csc_scaled = scaler_csr.transform(X_csc, copy=True) assert_false(np.any(np.isnan(X_csc_scaled.data))) assert_equal(scaler.mean_, scaler_csr.mean_) assert_array_almost_equal(scaler.std_, scaler_csr.std_) assert_equal(scaler.mean_, scaler_csc.mean_) assert_array_almost_equal(scaler.std_, scaler_csc.std_) assert_array_almost_equal( X_scaled.mean(axis=0), [0., 1.109, 1.856, 21., 1.559], 2) assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.]) X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis( X_csr_scaled.astype(np.float), 0) assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0)) assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0)) # Check that X has not been modified (copy) assert_true(X_scaled is not X) assert_true(X_csr_scaled is not X_csr) X_scaled_back = scaler.inverse_transform(X_scaled) assert_true(X_scaled_back is not X) assert_true(X_scaled_back is not X_scaled) assert_array_almost_equal(X_scaled_back, X) X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled) assert_true(X_csr_scaled_back is not X_csr) assert_true(X_csr_scaled_back is not X_csr_scaled) assert_array_almost_equal(X_csr_scaled_back.toarray(), X) X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc()) assert_true(X_csc_scaled_back is not X_csc) assert_true(X_csc_scaled_back is not X_csc_scaled) assert_array_almost_equal(X_csc_scaled_back.toarray(), X) def test_scaler_without_copy(): # Check that StandardScaler.fit does not change input rng = np.random.RandomState(42) X = rng.randn(4, 5) X[:, 0] = 0.0 # first feature is always of zero X_csr = sparse.csr_matrix(X) X_copy = X.copy() StandardScaler(copy=False).fit(X) assert_array_equal(X, X_copy) X_csr_copy = X_csr.copy() StandardScaler(with_mean=False, copy=False).fit(X_csr) assert_array_equal(X_csr.toarray(), X_csr_copy.toarray()) def test_scale_sparse_with_mean_raise_exception(): rng = np.random.RandomState(42) X = rng.randn(4, 5) X_csr = sparse.csr_matrix(X) # check scaling and fit with direct calls on sparse data assert_raises(ValueError, scale, X_csr, with_mean=True) assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr) # check transform and inverse_transform after a fit on a dense array scaler = StandardScaler(with_mean=True).fit(X) assert_raises(ValueError, scaler.transform, X_csr) X_transformed_csr = sparse.csr_matrix(scaler.transform(X)) assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr) def test_scale_input_finiteness_validation(): # Check if non finite inputs raise ValueError X = [np.nan, 5, 6, 7, 8] assert_raises_regex(ValueError, "Input contains NaN, infinity or a value too large", scale, X) X = [np.inf, 5, 6, 7, 8] assert_raises_regex(ValueError, "Input contains NaN, infinity or a value too large", scale, X) def test_scale_function_without_centering(): rng = np.random.RandomState(42) X = rng.randn(4, 5) X[:, 0] = 0.0 # first feature is always of zero X_csr = sparse.csr_matrix(X) X_scaled = scale(X, with_mean=False) assert_false(np.any(np.isnan(X_scaled))) X_csr_scaled = scale(X_csr, with_mean=False) assert_false(np.any(np.isnan(X_csr_scaled.data))) # test csc has same outcome X_csc_scaled = scale(X_csr.tocsc(), with_mean=False) assert_array_almost_equal(X_scaled, X_csc_scaled.toarray()) # raises value error on axis != 0 assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1) assert_array_almost_equal(X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2) assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.]) # Check that X has not been copied assert_true(X_scaled is not X) X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0) assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0)) assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0)) def test_robust_scaler_2d_arrays(): """Test robust scaling of 2d array along first axis""" rng = np.random.RandomState(0) X = rng.randn(4, 5) X[:, 0] = 0.0 # first feature is always of zero scaler = RobustScaler() X_scaled = scaler.fit(X).transform(X) assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0]) assert_array_almost_equal(X_scaled.std(axis=0)[0], 0) def test_robust_scaler_iris(): X = iris.data scaler = RobustScaler() X_trans = scaler.fit_transform(X) assert_array_almost_equal(np.median(X_trans, axis=0), 0) X_trans_inv = scaler.inverse_transform(X_trans) assert_array_almost_equal(X, X_trans_inv) q = np.percentile(X_trans, q=(25, 75), axis=0) iqr = q[1] - q[0] assert_array_almost_equal(iqr, 1) def test_robust_scale_axis1(): X = iris.data X_trans = robust_scale(X, axis=1) assert_array_almost_equal(np.median(X_trans, axis=1), 0) q = np.percentile(X_trans, q=(25, 75), axis=1) iqr = q[1] - q[0] assert_array_almost_equal(iqr, 1) def test_robust_scaler_zero_variance_features(): """Check RobustScaler on toy data with zero variance features""" X = [[0., 1., +0.5], [0., 1., -0.1], [0., 1., +1.1]] scaler = RobustScaler() X_trans = scaler.fit_transform(X) # NOTE: for such a small sample size, what we expect in the third column # depends HEAVILY on the method used to calculate quantiles. The values # here were calculated to fit the quantiles produces by np.percentile # using numpy 1.9 Calculating quantiles with # scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles # would yield very different results! X_expected = [[0., 0., +0.0], [0., 0., -1.0], [0., 0., +1.0]] assert_array_almost_equal(X_trans, X_expected) X_trans_inv = scaler.inverse_transform(X_trans) assert_array_almost_equal(X, X_trans_inv) # make sure new data gets transformed correctly X_new = [[+0., 2., 0.5], [-1., 1., 0.0], [+0., 1., 1.5]] X_trans_new = scaler.transform(X_new) X_expected_new = [[+0., 1., +0.], [-1., 0., -0.83333], [+0., 0., +1.66667]] assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3) def test_maxabs_scaler_zero_variance_features(): """Check MaxAbsScaler on toy data with zero variance features""" X = [[0., 1., +0.5], [0., 1., -0.3], [0., 1., +1.5], [0., 0., +0.0]] scaler = MaxAbsScaler() X_trans = scaler.fit_transform(X) X_expected = [[0., 1., 1.0 / 3.0], [0., 1., -0.2], [0., 1., 1.0], [0., 0., 0.0]] assert_array_almost_equal(X_trans, X_expected) X_trans_inv = scaler.inverse_transform(X_trans) assert_array_almost_equal(X, X_trans_inv) # make sure new data gets transformed correctly X_new = [[+0., 2., 0.5], [-1., 1., 0.0], [+0., 1., 1.5]] X_trans_new = scaler.transform(X_new) X_expected_new = [[+0., 2.0, 1.0 / 3.0], [-1., 1.0, 0.0], [+0., 1.0, 1.0]] assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2) # sparse data X_csr = sparse.csr_matrix(X) X_trans = scaler.fit_transform(X_csr) X_expected = [[0., 1., 1.0 / 3.0], [0., 1., -0.2], [0., 1., 1.0], [0., 0., 0.0]] assert_array_almost_equal(X_trans.A, X_expected) X_trans_inv = scaler.inverse_transform(X_trans) assert_array_almost_equal(X, X_trans_inv.A) def test_maxabs_scaler_large_negative_value(): """Check MaxAbsScaler on toy data with a large negative value""" X = [[0., 1., +0.5, -1.0], [0., 1., -0.3, -0.5], [0., 1., -100.0, 0.0], [0., 0., +0.0, -2.0]] scaler = MaxAbsScaler() X_trans = scaler.fit_transform(X) X_expected = [[0., 1., 0.005, -0.5], [0., 1., -0.003, -0.25], [0., 1., -1.0, 0.0], [0., 0., 0.0, -1.0]] assert_array_almost_equal(X_trans, X_expected) def test_warning_scaling_integers(): # Check warning when scaling integer data X = np.array([[1, 2, 0], [0, 0, 0]], dtype=np.uint8) w = "Data with input dtype uint8 was converted to float64" clean_warning_registry() assert_warns_message(DataConversionWarning, w, scale, X) assert_warns_message(DataConversionWarning, w, StandardScaler().fit, X) assert_warns_message(DataConversionWarning, w, MinMaxScaler().fit, X) def test_normalizer_l1(): rng = np.random.RandomState(0) X_dense = rng.randn(4, 5) X_sparse_unpruned = sparse.csr_matrix(X_dense) # set the row number 3 to zero X_dense[3, :] = 0.0 # set the row number 3 to zero without pruning (can happen in real life) indptr_3 = X_sparse_unpruned.indptr[3] indptr_4 = X_sparse_unpruned.indptr[4] X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0 # build the pruned variant using the regular constructor X_sparse_pruned = sparse.csr_matrix(X_dense) # check inputs that support the no-copy optim for X in (X_dense, X_sparse_pruned, X_sparse_unpruned): normalizer = Normalizer(norm='l1', copy=True) X_norm = normalizer.transform(X) assert_true(X_norm is not X) X_norm1 = toarray(X_norm) normalizer = Normalizer(norm='l1', copy=False) X_norm = normalizer.transform(X) assert_true(X_norm is X) X_norm2 = toarray(X_norm) for X_norm in (X_norm1, X_norm2): row_sums = np.abs(X_norm).sum(axis=1) for i in range(3): assert_almost_equal(row_sums[i], 1.0) assert_almost_equal(row_sums[3], 0.0) # check input for which copy=False won't prevent a copy for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix): X = init(X_dense) X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X) assert_true(X_norm is not X) assert_true(isinstance(X_norm, sparse.csr_matrix)) X_norm = toarray(X_norm) for i in range(3): assert_almost_equal(row_sums[i], 1.0) assert_almost_equal(la.norm(X_norm[3]), 0.0) def test_normalizer_l2(): rng = np.random.RandomState(0) X_dense = rng.randn(4, 5) X_sparse_unpruned = sparse.csr_matrix(X_dense) # set the row number 3 to zero X_dense[3, :] = 0.0 # set the row number 3 to zero without pruning (can happen in real life) indptr_3 = X_sparse_unpruned.indptr[3] indptr_4 = X_sparse_unpruned.indptr[4] X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0 # build the pruned variant using the regular constructor X_sparse_pruned = sparse.csr_matrix(X_dense) # check inputs that support the no-copy optim for X in (X_dense, X_sparse_pruned, X_sparse_unpruned): normalizer = Normalizer(norm='l2', copy=True) X_norm1 = normalizer.transform(X) assert_true(X_norm1 is not X) X_norm1 = toarray(X_norm1) normalizer = Normalizer(norm='l2', copy=False) X_norm2 = normalizer.transform(X) assert_true(X_norm2 is X) X_norm2 = toarray(X_norm2) for X_norm in (X_norm1, X_norm2): for i in range(3): assert_almost_equal(la.norm(X_norm[i]), 1.0) assert_almost_equal(la.norm(X_norm[3]), 0.0) # check input for which copy=False won't prevent a copy for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix): X = init(X_dense) X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X) assert_true(X_norm is not X) assert_true(isinstance(X_norm, sparse.csr_matrix)) X_norm = toarray(X_norm) for i in range(3): assert_almost_equal(la.norm(X_norm[i]), 1.0) assert_almost_equal(la.norm(X_norm[3]), 0.0) def test_normalizer_max(): rng = np.random.RandomState(0) X_dense = rng.randn(4, 5) X_sparse_unpruned = sparse.csr_matrix(X_dense) # set the row number 3 to zero X_dense[3, :] = 0.0 # set the row number 3 to zero without pruning (can happen in real life) indptr_3 = X_sparse_unpruned.indptr[3] indptr_4 = X_sparse_unpruned.indptr[4] X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0 # build the pruned variant using the regular constructor X_sparse_pruned = sparse.csr_matrix(X_dense) # check inputs that support the no-copy optim for X in (X_dense, X_sparse_pruned, X_sparse_unpruned): normalizer = Normalizer(norm='max', copy=True) X_norm1 = normalizer.transform(X) assert_true(X_norm1 is not X) X_norm1 = toarray(X_norm1) normalizer = Normalizer(norm='max', copy=False) X_norm2 = normalizer.transform(X) assert_true(X_norm2 is X) X_norm2 = toarray(X_norm2) for X_norm in (X_norm1, X_norm2): row_maxs = X_norm.max(axis=1) for i in range(3): assert_almost_equal(row_maxs[i], 1.0) assert_almost_equal(row_maxs[3], 0.0) # check input for which copy=False won't prevent a copy for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix): X = init(X_dense) X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X) assert_true(X_norm is not X) assert_true(isinstance(X_norm, sparse.csr_matrix)) X_norm = toarray(X_norm) for i in range(3): assert_almost_equal(row_maxs[i], 1.0) assert_almost_equal(la.norm(X_norm[3]), 0.0) def test_normalize(): # Test normalize function # Only tests functionality not used by the tests for Normalizer. X = np.random.RandomState(37).randn(3, 2) assert_array_equal(normalize(X, copy=False), normalize(X.T, axis=0, copy=False).T) assert_raises(ValueError, normalize, [[0]], axis=2) assert_raises(ValueError, normalize, [[0]], norm='l3') def test_binarizer(): X_ = np.array([[1, 0, 5], [2, 3, -1]]) for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix): X = init(X_.copy()) binarizer = Binarizer(threshold=2.0, copy=True) X_bin = toarray(binarizer.transform(X)) assert_equal(np.sum(X_bin == 0), 4) assert_equal(np.sum(X_bin == 1), 2) X_bin = binarizer.transform(X) assert_equal(sparse.issparse(X), sparse.issparse(X_bin)) binarizer = Binarizer(copy=True).fit(X) X_bin = toarray(binarizer.transform(X)) assert_true(X_bin is not X) assert_equal(np.sum(X_bin == 0), 2) assert_equal(np.sum(X_bin == 1), 4) binarizer = Binarizer(copy=True) X_bin = binarizer.transform(X) assert_true(X_bin is not X) X_bin = toarray(X_bin) assert_equal(np.sum(X_bin == 0), 2) assert_equal(np.sum(X_bin == 1), 4) binarizer = Binarizer(copy=False) X_bin = binarizer.transform(X) if init is not list: assert_true(X_bin is X) X_bin = toarray(X_bin) assert_equal(np.sum(X_bin == 0), 2) assert_equal(np.sum(X_bin == 1), 4) binarizer = Binarizer(threshold=-0.5, copy=True) for init in (np.array, list): X = init(X_.copy()) X_bin = toarray(binarizer.transform(X)) assert_equal(np.sum(X_bin == 0), 1) assert_equal(np.sum(X_bin == 1), 5) X_bin = binarizer.transform(X) # Cannot use threshold < 0 for sparse assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X)) def test_center_kernel(): # Test that KernelCenterer is equivalent to StandardScaler # in feature space rng = np.random.RandomState(0) X_fit = rng.random_sample((5, 4)) scaler = StandardScaler(with_std=False) scaler.fit(X_fit) X_fit_centered = scaler.transform(X_fit) K_fit = np.dot(X_fit, X_fit.T) # center fit time matrix centerer = KernelCenterer() K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T) K_fit_centered2 = centerer.fit_transform(K_fit) assert_array_almost_equal(K_fit_centered, K_fit_centered2) # center predict time matrix X_pred = rng.random_sample((2, 4)) K_pred = np.dot(X_pred, X_fit.T) X_pred_centered = scaler.transform(X_pred) K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T) K_pred_centered2 = centerer.transform(K_pred) assert_array_almost_equal(K_pred_centered, K_pred_centered2) def test_fit_transform(): rng = np.random.RandomState(0) X = rng.random_sample((5, 4)) for obj in ((StandardScaler(), Normalizer(), Binarizer())): X_transformed = obj.fit(X).transform(X) X_transformed2 = obj.fit_transform(X) assert_array_equal(X_transformed, X_transformed2) def test_add_dummy_feature(): X = [[1, 0], [0, 1], [0, 1]] X = add_dummy_feature(X) assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]]) def test_add_dummy_feature_coo(): X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]]) X = add_dummy_feature(X) assert_true(sparse.isspmatrix_coo(X), X) assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]]) def test_add_dummy_feature_csc(): X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]]) X = add_dummy_feature(X) assert_true(sparse.isspmatrix_csc(X), X) assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]]) def test_add_dummy_feature_csr(): X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]]) X = add_dummy_feature(X) assert_true(sparse.isspmatrix_csr(X), X) assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]]) def test_one_hot_encoder_sparse(): # Test OneHotEncoder's fit and transform. X = [[3, 2, 1], [0, 1, 1]] enc = OneHotEncoder() # discover max values automatically X_trans = enc.fit_transform(X).toarray() assert_equal(X_trans.shape, (2, 5)) assert_array_equal(enc.active_features_, np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0]) assert_array_equal(enc.feature_indices_, [0, 4, 7, 9]) # check outcome assert_array_equal(X_trans, [[0., 1., 0., 1., 1.], [1., 0., 1., 0., 1.]]) # max value given as 3 enc = OneHotEncoder(n_values=4) X_trans = enc.fit_transform(X) assert_equal(X_trans.shape, (2, 4 * 3)) assert_array_equal(enc.feature_indices_, [0, 4, 8, 12]) # max value given per feature enc = OneHotEncoder(n_values=[3, 2, 2]) X = [[1, 0, 1], [0, 1, 1]] X_trans = enc.fit_transform(X) assert_equal(X_trans.shape, (2, 3 + 2 + 2)) assert_array_equal(enc.n_values_, [3, 2, 2]) # check that testing with larger feature works: X = np.array([[2, 0, 1], [0, 1, 1]]) enc.transform(X) # test that an error is raised when out of bounds: X_too_large = [[0, 2, 1], [0, 1, 1]] assert_raises(ValueError, enc.transform, X_too_large) assert_raises(ValueError, OneHotEncoder(n_values=2).fit_transform, X) # test that error is raised when wrong number of features assert_raises(ValueError, enc.transform, X[:, :-1]) # test that error is raised when wrong number of features in fit # with prespecified n_values assert_raises(ValueError, enc.fit, X[:, :-1]) # test exception on wrong init param assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X) enc = OneHotEncoder() # test negative input to fit assert_raises(ValueError, enc.fit, [[0], [-1]]) # test negative input to transform enc.fit([[0], [1]]) assert_raises(ValueError, enc.transform, [[0], [-1]]) def test_one_hot_encoder_dense(): # check for sparse=False X = [[3, 2, 1], [0, 1, 1]] enc = OneHotEncoder(sparse=False) # discover max values automatically X_trans = enc.fit_transform(X) assert_equal(X_trans.shape, (2, 5)) assert_array_equal(enc.active_features_, np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0]) assert_array_equal(enc.feature_indices_, [0, 4, 7, 9]) # check outcome assert_array_equal(X_trans, np.array([[0., 1., 0., 1., 1.], [1., 0., 1., 0., 1.]])) def _check_transform_selected(X, X_expected, sel): for M in (X, sparse.csr_matrix(X)): Xtr = _transform_selected(M, Binarizer().transform, sel) assert_array_equal(toarray(Xtr), X_expected) def test_transform_selected(): X = [[3, 2, 1], [0, 1, 1]] X_expected = [[1, 2, 1], [0, 1, 1]] _check_transform_selected(X, X_expected, [0]) _check_transform_selected(X, X_expected, [True, False, False]) X_expected = [[1, 1, 1], [0, 1, 1]] _check_transform_selected(X, X_expected, [0, 1, 2]) _check_transform_selected(X, X_expected, [True, True, True]) _check_transform_selected(X, X_expected, "all") _check_transform_selected(X, X, []) _check_transform_selected(X, X, [False, False, False]) def _run_one_hot(X, X2, cat): enc = OneHotEncoder(categorical_features=cat) Xtr = enc.fit_transform(X) X2tr = enc.transform(X2) return Xtr, X2tr def _check_one_hot(X, X2, cat, n_features): ind = np.where(cat)[0] # With mask A, B = _run_one_hot(X, X2, cat) # With indices C, D = _run_one_hot(X, X2, ind) # Check shape assert_equal(A.shape, (2, n_features)) assert_equal(B.shape, (1, n_features)) assert_equal(C.shape, (2, n_features)) assert_equal(D.shape, (1, n_features)) # Check that mask and indices give the same results assert_array_equal(toarray(A), toarray(C)) assert_array_equal(toarray(B), toarray(D)) def test_one_hot_encoder_categorical_features(): X = np.array([[3, 2, 1], [0, 1, 1]]) X2 = np.array([[1, 1, 1]]) cat = [True, False, False] _check_one_hot(X, X2, cat, 4) # Edge case: all non-categorical cat = [False, False, False] _check_one_hot(X, X2, cat, 3) # Edge case: all categorical cat = [True, True, True] _check_one_hot(X, X2, cat, 5) def test_one_hot_encoder_unknown_transform(): X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]]) y = np.array([[4, 1, 1]]) # Test that one hot encoder raises error for unknown features # present during transform. oh = OneHotEncoder(handle_unknown='error') oh.fit(X) assert_raises(ValueError, oh.transform, y) # Test the ignore option, ignores unknown features. oh = OneHotEncoder(handle_unknown='ignore') oh.fit(X) assert_array_equal( oh.transform(y).toarray(), np.array([[0., 0., 0., 0., 1., 0., 0.]]) ) # Raise error if handle_unknown is neither ignore or error. oh = OneHotEncoder(handle_unknown='42') oh.fit(X) assert_raises(ValueError, oh.transform, y)
bsd-3-clause
leesavide/pythonista-docs
Documentation/matplotlib/mpl_examples/api/custom_scale_example.py
9
6401
from __future__ import unicode_literals import numpy as np from numpy import ma from matplotlib import scale as mscale from matplotlib import transforms as mtransforms from matplotlib.ticker import Formatter, FixedLocator class MercatorLatitudeScale(mscale.ScaleBase): """ Scales data in range -pi/2 to pi/2 (-90 to 90 degrees) using the system used to scale latitudes in a Mercator projection. The scale function: ln(tan(y) + sec(y)) The inverse scale function: atan(sinh(y)) Since the Mercator scale tends to infinity at +/- 90 degrees, there is user-defined threshold, above and below which nothing will be plotted. This defaults to +/- 85 degrees. source: http://en.wikipedia.org/wiki/Mercator_projection """ # The scale class must have a member ``name`` that defines the # string used to select the scale. For example, # ``gca().set_yscale("mercator")`` would be used to select this # scale. name = 'mercator' def __init__(self, axis, **kwargs): """ Any keyword arguments passed to ``set_xscale`` and ``set_yscale`` will be passed along to the scale's constructor. thresh: The degree above which to crop the data. """ mscale.ScaleBase.__init__(self) thresh = kwargs.pop("thresh", (85 / 180.0) * np.pi) if thresh >= np.pi / 2.0: raise ValueError("thresh must be less than pi/2") self.thresh = thresh def get_transform(self): """ Override this method to return a new instance that does the actual transformation of the data. The MercatorLatitudeTransform class is defined below as a nested class of this one. """ return self.MercatorLatitudeTransform(self.thresh) def set_default_locators_and_formatters(self, axis): """ Override to set up the locators and formatters to use with the scale. This is only required if the scale requires custom locators and formatters. Writing custom locators and formatters is rather outside the scope of this example, but there are many helpful examples in ``ticker.py``. In our case, the Mercator example uses a fixed locator from -90 to 90 degrees and a custom formatter class to put convert the radians to degrees and put a degree symbol after the value:: """ class DegreeFormatter(Formatter): def __call__(self, x, pos=None): # \u00b0 : degree symbol return "%d\u00b0" % ((x / np.pi) * 180.0) deg2rad = np.pi / 180.0 axis.set_major_locator(FixedLocator( np.arange(-90, 90, 10) * deg2rad)) axis.set_major_formatter(DegreeFormatter()) axis.set_minor_formatter(DegreeFormatter()) def limit_range_for_scale(self, vmin, vmax, minpos): """ Override to limit the bounds of the axis to the domain of the transform. In the case of Mercator, the bounds should be limited to the threshold that was passed in. Unlike the autoscaling provided by the tick locators, this range limiting will always be adhered to, whether the axis range is set manually, determined automatically or changed through panning and zooming. """ return max(vmin, -self.thresh), min(vmax, self.thresh) class MercatorLatitudeTransform(mtransforms.Transform): # There are two value members that must be defined. # ``input_dims`` and ``output_dims`` specify number of input # dimensions and output dimensions to the transformation. # These are used by the transformation framework to do some # error checking and prevent incompatible transformations from # being connected together. When defining transforms for a # scale, which are, by definition, separable and have only one # dimension, these members should always be set to 1. input_dims = 1 output_dims = 1 is_separable = True def __init__(self, thresh): mtransforms.Transform.__init__(self) self.thresh = thresh def transform_non_affine(self, a): """ This transform takes an Nx1 ``numpy`` array and returns a transformed copy. Since the range of the Mercator scale is limited by the user-specified threshold, the input array must be masked to contain only valid values. ``matplotlib`` will handle masked arrays and remove the out-of-range data from the plot. Importantly, the ``transform`` method *must* return an array that is the same shape as the input array, since these values need to remain synchronized with values in the other dimension. """ masked = ma.masked_where((a < -self.thresh) | (a > self.thresh), a) if masked.mask.any(): return ma.log(np.abs(ma.tan(masked) + 1.0 / ma.cos(masked))) else: return np.log(np.abs(np.tan(a) + 1.0 / np.cos(a))) def inverted(self): """ Override this method so matplotlib knows how to get the inverse transform for this transform. """ return MercatorLatitudeScale.InvertedMercatorLatitudeTransform(self.thresh) class InvertedMercatorLatitudeTransform(mtransforms.Transform): input_dims = 1 output_dims = 1 is_separable = True def __init__(self, thresh): mtransforms.Transform.__init__(self) self.thresh = thresh def transform_non_affine(self, a): return np.arctan(np.sinh(a)) def inverted(self): return MercatorLatitudeScale.MercatorLatitudeTransform(self.thresh) # Now that the Scale class has been defined, it must be registered so # that ``matplotlib`` can find it. mscale.register_scale(MercatorLatitudeScale) if __name__ == '__main__': import matplotlib.pyplot as plt t = np.arange(-180.0, 180.0, 0.1) s = t / 360.0 * np.pi plt.plot(t, s, '-', lw=2) plt.gca().set_yscale('mercator') plt.xlabel('Longitude') plt.ylabel('Latitude') plt.title('Mercator: Projection of the Oppressor') plt.grid(True) plt.show()
apache-2.0
robcarver17/pysystemtrade
systems/accounts/pandl_calculators/pandl_generic_costs.py
1
3494
import pandas as pd from systems.accounts.pandl_calculators.pandl_calculation import pandlCalculation, apply_weighting curve_types = ['gross', 'net', 'costs'] GROSS_CURVE = 'gross' NET_CURVE = 'net' COSTS_CURVE = 'costs' class pandlCalculationWithGenericCosts(pandlCalculation): def weight(self, weight: pd.Series): weighted_capital = apply_weighting(weight, self.capital) weighted_positions = apply_weighting(weight, self.positions) return pandlCalculationWithGenericCosts(self.price, positions = weighted_positions, fx = self.fx, capital = weighted_capital, value_per_point = self.value_per_point, roundpositions = self.roundpositions, delayfill = self.delayfill) def as_pd_series(self, percent = False, curve_type=NET_CURVE): if curve_type==NET_CURVE: if percent: return self.net_percentage_pandl() else: return self.net_pandl_in_base_currency() elif curve_type==GROSS_CURVE: if percent: return self.percentage_pandl() else: return self.pandl_in_base_currency() elif curve_type==COSTS_CURVE: if percent: return self.costs_percentage_pandl() else: return self.costs_pandl_in_base_currency() else: raise Exception("Curve type %s not recognised! Must be one of %s" % (curve_type, curve_types)) def net_percentage_pandl(self) -> pd.Series: gross = self.percentage_pandl() costs = self.costs_percentage_pandl() net = _add_gross_and_costs(gross, costs) return net def net_pandl_in_base_currency(self) -> pd.Series: gross = self.pandl_in_base_currency() costs = self.costs_pandl_in_base_currency() net = _add_gross_and_costs(gross, costs) return net def net_pandl_in_instrument_currency(self) -> pd.Series: gross = self.pandl_in_instrument_currency() costs = self.costs_pandl_in_instrument_currency() net = _add_gross_and_costs(gross, costs) return net def net_pandl_in_points(self) -> pd.Series: gross = self.pandl_in_points() costs = self.costs_pandl_in_points() net = _add_gross_and_costs(gross, costs) return net def costs_percentage_pandl(self) -> pd.Series: costs_in_base = self.costs_pandl_in_base_currency() costs = self._percentage_pandl_given_pandl(costs_in_base) return costs def costs_pandl_in_base_currency(self) -> pd.Series: costs_in_instr_ccy = self.costs_pandl_in_instrument_currency() costs_in_base = self._base_pandl_given_currency_pandl(costs_in_instr_ccy) return costs_in_base def costs_pandl_in_instrument_currency(self) -> pd.Series: costs_in_points = self.costs_pandl_in_points() costs_in_instr_ccy = self._pandl_in_instrument_ccy_given_points_pandl(costs_in_points) return costs_in_instr_ccy def costs_pandl_in_points(self) -> pd.Series: raise NotImplementedError def _add_gross_and_costs(gross: pd.Series, costs: pd.Series): cumsum_costs = costs.cumsum() cumsum_costs_aligned = cumsum_costs.reindex(gross.index, method="ffill") costs_aligned = cumsum_costs_aligned.diff() net = gross + costs_aligned return net
gpl-3.0
DamCB/tyssue
tyssue/draw/ipv_draw.py
2
8114
"""3D visualisation inside the notebook. """ import warnings import numpy as np import pandas as pd from matplotlib import cm from ipywidgets import interact from ..config.draw import sheet_spec from ..utils.utils import spec_updater, get_sub_eptm try: import ipyvolume as ipv except ImportError: print( """ This module needs ipyvolume to work. You can install it with: $ conda install -c conda-forge ipyvolume """ ) def browse_history(history, coords=["x", "y", "z"], **draw_specs_kw): times = history.time_stamps num_frames = times.size draw_specs = sheet_spec() spec_updater(draw_specs, draw_specs_kw) sheet = history.retrieve(0) ipv.clear() fig, meshes = sheet_view(sheet, coords, **draw_specs_kw) lim_inf = sheet.vert_df[sheet.coords].min().min() lim_sup = sheet.vert_df[sheet.coords].max().max() ipv.xyzlim(lim_inf, lim_sup) def set_frame(i=0): fig.animation = 0 t = times[i] meshes = _get_meshes(history.retrieve(t), coords, draw_specs) update_view(fig, meshes) ipv.show() interact(set_frame, i=(0, num_frames - 1)) def update_view(fig, meshes): for old, new in zip(fig.meshes, meshes): old.x = new.x old.y = new.y old.z = new.z old.color = new.color old.triangles = new.triangles old.lines = new.lines def sheet_view(sheet, coords=["x", "y", "z"], **draw_specs_kw): """ Creates a javascript renderer of the edge lines to be displayed in Jupyter Notebooks Returns ------- fig: a :class:`ipyvolume.widgets.Figure` widget mesh: a :class:`ipyvolume.widgets.Mesh` mesh widget """ # ipv.style.use(["dark", "minimal"]) draw_specs = sheet_spec() spec_updater(draw_specs, draw_specs_kw) fig = ipv.gcf() fig.meshes = fig.meshes + _get_meshes(sheet, coords, draw_specs) box_size = max(*(np.ptp(sheet.vert_df[u]) for u in sheet.coords)) border = 0.05 * box_size lim_inf = sheet.vert_df[sheet.coords].min().min() - border lim_sup = sheet.vert_df[sheet.coords].max().max() + border ipv.xyzlim(lim_inf, lim_sup) return fig, fig.meshes def view_ipv(sheet, coords=["x", "y", "z"], **edge_specs): """ Creates a javascript renderer of the edge lines to be displayed in Jupyter Notebooks Returns ------- fig: a :class:`ipyvolume.widgets.Figure` widget mesh: a :class:`ipyvolume.widgets.Mesh` mesh widget """ warnings.warn("`view_ipv` is deprecated, use the more generic `sheet_view`") mesh = edge_mesh(sheet, coords, **edge_specs) fig = ipv.gcf() fig.meshes = fig.meshes + [mesh] box_size = max(*(np.ptp(sheet.vert_df[u]) for u in sheet.coords)) border = 0.05 * box_size lim_inf = sheet.vert_df[sheet.coords].min().min() - border lim_sup = sheet.vert_df[sheet.coords].max().max() + border ipv.xyzlim(lim_inf, lim_sup) return fig, mesh def edge_mesh(sheet, coords, **edge_specs): """ Creates a ipyvolume Mesh of the edge lines to be displayed in Jupyter Notebooks Returns ------- mesh: a :class:`ipyvolume.widgets.Mesh` mesh widget """ spec = sheet_spec()["edge"] spec.update(**edge_specs) if callable(spec["color"]): spec["color"] = spec["color"](sheet) if isinstance(spec["color"], str): color = spec["color"] elif hasattr(spec["color"], "__len__"): color = _wire_color_from_sequence(spec, sheet)[:, :3] u, v, w = coords mesh = ipv.Mesh( x=sheet.vert_df[u], y=sheet.vert_df[v], z=sheet.vert_df[w], lines=sheet.edge_df[["srce", "trgt"]].astype(dtype=np.uint32), color=color, ) return mesh def face_mesh(sheet, coords, **face_draw_specs): """ Creates a ipyvolume Mesh of the face polygons """ Ne, Nf = sheet.Ne, sheet.Nf if callable(face_draw_specs["color"]): face_draw_specs["color"] = face_draw_specs["color"](sheet) if isinstance(face_draw_specs["color"], str): color = face_draw_specs["color"] elif hasattr(face_draw_specs["color"], "__len__"): color = _face_color_from_sequence(face_draw_specs, sheet)[:, :3] if "visible" in sheet.face_df.columns: edges = sheet.edge_df[sheet.upcast_face(sheet.face_df["visible"])].index _sheet = get_sub_eptm(sheet, edges) if _sheet is not None: sheet = _sheet if isinstance(color, np.ndarray): faces = sheet.face_df["face_o"].values.astype(np.uint32) edges = edges.values.astype(np.uint32) indexer = np.concatenate([faces, edges + Nf, edges + Ne + Nf]) color = color.take(indexer, axis=0) epsilon = face_draw_specs.get("epsilon", 0) up_srce = sheet.edge_df[["s" + c for c in coords]] up_trgt = sheet.edge_df[["t" + c for c in coords]] Ne, Nf = sheet.Ne, sheet.Nf if epsilon > 0: up_face = sheet.edge_df[["f" + c for c in coords]].values up_srce = (up_srce - up_face) * (1 - epsilon) + up_face up_trgt = (up_trgt - up_face) * (1 - epsilon) + up_face mesh_ = np.concatenate( [sheet.face_df[coords].values, up_srce.values, up_trgt.values] ) triangles = np.vstack( [sheet.edge_df["face"], np.arange(Ne) + Nf, np.arange(Ne) + Ne + Nf] ).T.astype(dtype=np.uint32) mesh = ipv.Mesh( x=mesh_[:, 0], y=mesh_[:, 1], z=mesh_[:, 2], triangles=triangles, color=color ) return mesh def _wire_color_from_sequence(edge_spec, sheet): """ """ color_ = edge_spec["color"] cmap = cm.get_cmap(edge_spec.get("colormap", "viridis")) if color_.shape in [(sheet.Nv, 3), (sheet.Nv, 4)]: return np.asarray(color_) if color_.shape == (sheet.Nv,): if np.ptp(color_) < 1e-10: return np.ones((sheet.Nv, 3)) * 0.7 return cmap((color_ - color_.min()) / np.ptp(color_)) if color_.shape in [(sheet.Ne, 3), (sheet.Ne, 4)]: color_ = pd.DataFrame(color_, index=sheet.edge_df.index) color_["srce"] = sheet.edge_df["srce"] color_ = color_.groupby("srce").mean().values return color_ if color_.shape == (sheet.Ne,): color_ = pd.DataFrame(color_, index=sheet.edge_df.index) color_["srce"] = sheet.edge_df["srce"] color_ = color_.groupby("srce").mean().values.ravel() if np.ptp(color_) < 1e-10: warnings.warn("Attempting to draw a colormap " "with a uniform value") return np.ones((sheet.Nv, 3)) * 0.7 return cmap((color_ - color_.min()) / np.ptp(color_)) else: raise ValueError("The 'color' value of the spec doesn't have a correct shape.") def _face_color_from_sequence(face_spec, sheet): color_ = face_spec["color"] cmap = cm.get_cmap(face_spec.get("colormap", "viridis")) Nf, Ne = sheet.Nf, sheet.Ne color_min, color_max = face_spec.get("color_range", (color_.min(), color_.max())) face_mesh_shape = Nf + 2 * Ne if color_.shape in [(sheet.Nf, 3), (sheet.Nf, 4)]: return np.concatenate([color_, color_, color_]) elif color_.shape == (sheet.Nf,): if np.ptp(color_) < 1e-10: # warnings.warn("Attempting to draw a colormap with a uniform value") return np.ones((face_mesh_shape, 3)) * 0.5 normed = (color_ - color_min) / (color_max - color_min) up_color = sheet.upcast_face(normed).values return cmap(np.concatenate([normed, up_color, up_color])) else: raise ValueError( "shape of `face_spec['color']` must be either (Nf, 3), (Nf, 4) or (Nf,)" ) def _get_meshes(sheet, coords, draw_specs): meshes = [] edge_spec = draw_specs["edge"] if edge_spec["visible"]: edges = edge_mesh(sheet, coords, **edge_spec) meshes.append(edges) else: edges = None face_spec = draw_specs["face"] if face_spec["visible"]: faces = face_mesh(sheet, coords, **face_spec) meshes.append(faces) else: faces = None return meshes
gpl-3.0
matthew-tucker/mne-python
examples/time_frequency/plot_source_power_spectrum.py
19
1929
""" ========================================================= Compute power spectrum densities of the sources with dSPM ========================================================= Returns an STC file containing the PSD (in dB) of each of the sources. """ # Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr> # # License: BSD (3-clause) import matplotlib.pyplot as plt import mne from mne import io from mne.datasets import sample from mne.minimum_norm import read_inverse_operator, compute_source_psd print(__doc__) ############################################################################### # Set parameters data_path = sample.data_path() raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif' fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif' fname_label = data_path + '/MEG/sample/labels/Aud-lh.label' # Setup for reading the raw data raw = io.Raw(raw_fname, verbose=False) events = mne.find_events(raw, stim_channel='STI 014') inverse_operator = read_inverse_operator(fname_inv) raw.info['bads'] = ['MEG 2443', 'EEG 053'] # picks MEG gradiometers picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True, stim=False, exclude='bads') tmin, tmax = 0, 120 # use the first 120s of data fmin, fmax = 4, 100 # look at frequencies between 4 and 100Hz n_fft = 2048 # the FFT size (n_fft). Ideally a power of 2 label = mne.read_label(fname_label) stc = compute_source_psd(raw, inverse_operator, lambda2=1. / 9., method="dSPM", tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, pick_ori="normal", n_fft=n_fft, label=label) stc.save('psd_dSPM') ############################################################################### # View PSD of sources in label plt.plot(1e3 * stc.times, stc.data.T) plt.xlabel('Frequency (Hz)') plt.ylabel('PSD (dB)') plt.title('Source Power Spectrum (PSD)') plt.show()
bsd-3-clause
keras-team/keras-io
examples/nlp/semantic_similarity_with_bert.py
1
11604
""" Title: Semantic Similarity with BERT Author: [Mohamad Merchant](https://twitter.com/mohmadmerchant1) Date created: 2020/08/15 Last modified: 2020/08/29 Description: Natural Language Inference by fine-tuning BERT model on SNLI Corpus. """ """ ## Introduction Semantic Similarity is the task of determining how similar two sentences are, in terms of what they mean. This example demonstrates the use of SNLI (Stanford Natural Language Inference) Corpus to predict sentence semantic similarity with Transformers. We will fine-tune a BERT model that takes two sentences as inputs and that outputs a similarity score for these two sentences. ### References * [BERT](https://arxiv.org/pdf/1810.04805.pdf) * [SNLI](https://nlp.stanford.edu/projects/snli/) """ """ ## Setup Note: install HuggingFace `transformers` via `pip install transformers` (version >= 2.11.0). """ import numpy as np import pandas as pd import tensorflow as tf import transformers """ ## Configuration """ max_length = 128 # Maximum length of input sentence to the model. batch_size = 32 epochs = 2 # Labels in our dataset. labels = ["contradiction", "entailment", "neutral"] """ ## Load the Data """ """shell curl -LO https://raw.githubusercontent.com/MohamadMerchant/SNLI/master/data.tar.gz tar -xvzf data.tar.gz """ # There are more than 550k samples in total; we will use 100k for this example. train_df = pd.read_csv("SNLI_Corpus/snli_1.0_train.csv", nrows=100000) valid_df = pd.read_csv("SNLI_Corpus/snli_1.0_dev.csv") test_df = pd.read_csv("SNLI_Corpus/snli_1.0_test.csv") # Shape of the data print(f"Total train samples : {train_df.shape[0]}") print(f"Total validation samples: {valid_df.shape[0]}") print(f"Total test samples: {valid_df.shape[0]}") """ Dataset Overview: - sentence1: The premise caption that was supplied to the author of the pair. - sentence2: The hypothesis caption that was written by the author of the pair. - similarity: This is the label chosen by the majority of annotators. Where no majority exists, the label "-" is used (we will skip such samples here). Here are the "similarity" label values in our dataset: - Contradiction: The sentences share no similarity. - Entailment: The sentences have similar meaning. - Neutral: The sentences are neutral. """ """ Let's look at one sample from the dataset: """ print(f"Sentence1: {train_df.loc[1, 'sentence1']}") print(f"Sentence2: {train_df.loc[1, 'sentence2']}") print(f"Similarity: {train_df.loc[1, 'similarity']}") """ ## Preprocessing """ # We have some NaN entries in our train data, we will simply drop them. print("Number of missing values") print(train_df.isnull().sum()) train_df.dropna(axis=0, inplace=True) """ Distribution of our training targets. """ print("Train Target Distribution") print(train_df.similarity.value_counts()) """ Distribution of our validation targets. """ print("Validation Target Distribution") print(valid_df.similarity.value_counts()) """ The value "-" appears as part of our training and validation targets. We will skip these samples. """ train_df = ( train_df[train_df.similarity != "-"] .sample(frac=1.0, random_state=42) .reset_index(drop=True) ) valid_df = ( valid_df[valid_df.similarity != "-"] .sample(frac=1.0, random_state=42) .reset_index(drop=True) ) """ One-hot encode training, validation, and test labels. """ train_df["label"] = train_df["similarity"].apply( lambda x: 0 if x == "contradiction" else 1 if x == "entailment" else 2 ) y_train = tf.keras.utils.to_categorical(train_df.label, num_classes=3) valid_df["label"] = valid_df["similarity"].apply( lambda x: 0 if x == "contradiction" else 1 if x == "entailment" else 2 ) y_val = tf.keras.utils.to_categorical(valid_df.label, num_classes=3) test_df["label"] = test_df["similarity"].apply( lambda x: 0 if x == "contradiction" else 1 if x == "entailment" else 2 ) y_test = tf.keras.utils.to_categorical(test_df.label, num_classes=3) """ ## Create a custom data generator """ class BertSemanticDataGenerator(tf.keras.utils.Sequence): """Generates batches of data. Args: sentence_pairs: Array of premise and hypothesis input sentences. labels: Array of labels. batch_size: Integer batch size. shuffle: boolean, whether to shuffle the data. include_targets: boolean, whether to incude the labels. Returns: Tuples `([input_ids, attention_mask, `token_type_ids], labels)` (or just `[input_ids, attention_mask, `token_type_ids]` if `include_targets=False`) """ def __init__( self, sentence_pairs, labels, batch_size=batch_size, shuffle=True, include_targets=True, ): self.sentence_pairs = sentence_pairs self.labels = labels self.shuffle = shuffle self.batch_size = batch_size self.include_targets = include_targets # Load our BERT Tokenizer to encode the text. # We will use base-base-uncased pretrained model. self.tokenizer = transformers.BertTokenizer.from_pretrained( "bert-base-uncased", do_lower_case=True ) self.indexes = np.arange(len(self.sentence_pairs)) self.on_epoch_end() def __len__(self): # Denotes the number of batches per epoch. return len(self.sentence_pairs) // self.batch_size def __getitem__(self, idx): # Retrieves the batch of index. indexes = self.indexes[idx * self.batch_size : (idx + 1) * self.batch_size] sentence_pairs = self.sentence_pairs[indexes] # With BERT tokenizer's batch_encode_plus batch of both the sentences are # encoded together and separated by [SEP] token. encoded = self.tokenizer.batch_encode_plus( sentence_pairs.tolist(), add_special_tokens=True, max_length=max_length, return_attention_mask=True, return_token_type_ids=True, pad_to_max_length=True, return_tensors="tf", ) # Convert batch of encoded features to numpy array. input_ids = np.array(encoded["input_ids"], dtype="int32") attention_masks = np.array(encoded["attention_mask"], dtype="int32") token_type_ids = np.array(encoded["token_type_ids"], dtype="int32") # Set to true if data generator is used for training/validation. if self.include_targets: labels = np.array(self.labels[indexes], dtype="int32") return [input_ids, attention_masks, token_type_ids], labels else: return [input_ids, attention_masks, token_type_ids] def on_epoch_end(self): # Shuffle indexes after each epoch if shuffle is set to True. if self.shuffle: np.random.RandomState(42).shuffle(self.indexes) """ ## Build the model """ # Create the model under a distribution strategy scope. strategy = tf.distribute.MirroredStrategy() with strategy.scope(): # Encoded token ids from BERT tokenizer. input_ids = tf.keras.layers.Input( shape=(max_length,), dtype=tf.int32, name="input_ids" ) # Attention masks indicates to the model which tokens should be attended to. attention_masks = tf.keras.layers.Input( shape=(max_length,), dtype=tf.int32, name="attention_masks" ) # Token type ids are binary masks identifying different sequences in the model. token_type_ids = tf.keras.layers.Input( shape=(max_length,), dtype=tf.int32, name="token_type_ids" ) # Loading pretrained BERT model. bert_model = transformers.TFBertModel.from_pretrained("bert-base-uncased") # Freeze the BERT model to reuse the pretrained features without modifying them. bert_model.trainable = False sequence_output, pooled_output = bert_model( input_ids, attention_mask=attention_masks, token_type_ids=token_type_ids ) # Add trainable layers on top of frozen layers to adapt the pretrained features on the new data. bi_lstm = tf.keras.layers.Bidirectional( tf.keras.layers.LSTM(64, return_sequences=True) )(sequence_output) # Applying hybrid pooling approach to bi_lstm sequence output. avg_pool = tf.keras.layers.GlobalAveragePooling1D()(bi_lstm) max_pool = tf.keras.layers.GlobalMaxPooling1D()(bi_lstm) concat = tf.keras.layers.concatenate([avg_pool, max_pool]) dropout = tf.keras.layers.Dropout(0.3)(concat) output = tf.keras.layers.Dense(3, activation="softmax")(dropout) model = tf.keras.models.Model( inputs=[input_ids, attention_masks, token_type_ids], outputs=output ) model.compile( optimizer=tf.keras.optimizers.Adam(), loss="categorical_crossentropy", metrics=["acc"], ) print(f"Strategy: {strategy}") model.summary() """ Create train and validation data generators """ train_data = BertSemanticDataGenerator( train_df[["sentence1", "sentence2"]].values.astype("str"), y_train, batch_size=batch_size, shuffle=True, ) valid_data = BertSemanticDataGenerator( valid_df[["sentence1", "sentence2"]].values.astype("str"), y_val, batch_size=batch_size, shuffle=False, ) """ ## Train the Model Training is done only for the top layers to perform "feature extraction", which will allow the model to use the representations of the pretrained model. """ history = model.fit( train_data, validation_data=valid_data, epochs=epochs, use_multiprocessing=True, workers=-1, ) """ ## Fine-tuning This step must only be performed after the feature extraction model has been trained to convergence on the new data. This is an optional last step where `bert_model` is unfreezed and retrained with a very low learning rate. This can deliver meaningful improvement by incrementally adapting the pretrained features to the new data. """ # Unfreeze the bert_model. bert_model.trainable = True # Recompile the model to make the change effective. model.compile( optimizer=tf.keras.optimizers.Adam(1e-5), loss="categorical_crossentropy", metrics=["accuracy"], ) model.summary() """ ## Train the entire model end-to-end """ history = model.fit( train_data, validation_data=valid_data, epochs=epochs, use_multiprocessing=True, workers=-1, ) """ ## Evaluate model on the test set """ test_data = BertSemanticDataGenerator( test_df[["sentence1", "sentence2"]].values.astype("str"), y_test, batch_size=batch_size, shuffle=False, ) model.evaluate(test_data, verbose=1) """ ## Inference on custom sentences """ def check_similarity(sentence1, sentence2): sentence_pairs = np.array([[str(sentence1), str(sentence2)]]) test_data = BertSemanticDataGenerator( sentence_pairs, labels=None, batch_size=1, shuffle=False, include_targets=False, ) proba = model.predict(test_data)[0] idx = np.argmax(proba) proba = f"{proba[idx]: .2f}%" pred = labels[idx] return pred, proba """ Check results on some example sentence pairs. """ sentence1 = "Two women are observing something together." sentence2 = "Two women are standing with their eyes closed." check_similarity(sentence1, sentence2) """ Check results on some example sentence pairs. """ sentence1 = "A smiling costumed woman is holding an umbrella" sentence2 = "A happy woman in a fairy costume holds an umbrella" check_similarity(sentence1, sentence2) """ Check results on some example sentence pairs """ sentence1 = "A soccer game with multiple males playing" sentence2 = "Some men are playing a sport" check_similarity(sentence1, sentence2)
apache-2.0
cbertinato/pandas
pandas/tests/indexes/timedeltas/test_scalar_compat.py
1
2391
""" Tests for TimedeltaIndex methods behaving like their Timedelta counterparts """ import numpy as np import pytest import pandas as pd from pandas import Index, Series, Timedelta, TimedeltaIndex, timedelta_range import pandas.util.testing as tm class TestVectorizedTimedelta: def test_tdi_total_seconds(self): # GH#10939 # test index rng = timedelta_range('1 days, 10:11:12.100123456', periods=2, freq='s') expt = [1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456. / 1e9, 1 * 86400 + 10 * 3600 + 11 * 60 + 13 + 100123456. / 1e9] tm.assert_almost_equal(rng.total_seconds(), Index(expt)) # test Series ser = Series(rng) s_expt = Series(expt, index=[0, 1]) tm.assert_series_equal(ser.dt.total_seconds(), s_expt) # with nat ser[1] = np.nan s_expt = Series([1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456. / 1e9, np.nan], index=[0, 1]) tm.assert_series_equal(ser.dt.total_seconds(), s_expt) # with both nat ser = Series([np.nan, np.nan], dtype='timedelta64[ns]') tm.assert_series_equal(ser.dt.total_seconds(), Series([np.nan, np.nan], index=[0, 1])) def test_tdi_round(self): td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min') elt = td[1] expected_rng = TimedeltaIndex([Timedelta('16801 days 00:00:00'), Timedelta('16801 days 00:00:00'), Timedelta('16801 days 01:00:00'), Timedelta('16801 days 02:00:00'), Timedelta('16801 days 02:00:00')]) expected_elt = expected_rng[1] tm.assert_index_equal(td.round(freq='H'), expected_rng) assert elt.round(freq='H') == expected_elt msg = pd._libs.tslibs.frequencies.INVALID_FREQ_ERR_MSG with pytest.raises(ValueError, match=msg): td.round(freq='foo') with pytest.raises(ValueError, match=msg): elt.round(freq='foo') msg = "<MonthEnd> is a non-fixed frequency" with pytest.raises(ValueError, match=msg): td.round(freq='M') with pytest.raises(ValueError, match=msg): elt.round(freq='M')
bsd-3-clause
mkocka/galaxytea
modeling/domcek/plots.py
1
4294
import matplotlib.pyplot as plt from numpy import * ###List of variables # r_in [10**10 cm] innder radius # r_out [10**10 cm] outer radius # step [10**10 cm] step of plot # alfa [] parameter of accretion # M_16 [10**16 g.s**(-1)] accretion flow # m_1 [solar mass] mass of compact object # R_hv [10**10 cm] radius of compact object # R_10 [10**10 cm] distance from compact object # f numerical factor ###List of computed parameters # Surface density [g.cm**(-2)] (sigma) # Height [cm] (H) # Density [g.cm**(-3)] (rho) # Central disc temeprature [K] (T_c) # Opacity [] (tau) # viscosity [cm**2.s**(-1)] (nu) # radial velocity towards center [cm.s**(-1)] (v_r) ###function solutions parameters # parameter 1 r_in # parameter 2 r_out # parameter 3 step # parameter 4 alfa # parameter 5 M_16 # parameter 6 m_1 # parameter 7 R_hv def solutions(r_in,r_out,step,alfa,M_16,m_1,R_hv): #defining lists list_function = arange(r_in,r_out,step) R_10_l,surface_density_l,height_l,density_l,Fx = ([] for i in range(5)) temperature_l,opacity_l,viscosity_l,radial_velocity_l = ([] for i in range(4)) #computation and appending to lists for R_10 in list_function: f=(1-((R_hv)/(R_10))**(1.0/2))**(1.0/4) surface_density = 5.2*alfa**(-4.0/5)*M_16**(7.0/10)*m_1**(1.0/4)*R_10**(-3.0/4)*f**(14.0/5) height = 1.7*10**8*alfa**(-1.0/10)*M_16**(3.0/20)*m_1**(-3.0/8)*R_10**(9.0/8)*f**(3.0/5) density = 3.1*10**(-8)*alfa**(-7.0/10)*M_16**(11.0/20)*m_1**(5.0/8)*R_10**(-15.0/8)*f**(11.0/5) temperature = 1.4*10**4*alfa**(-1.0/5)*M_16**(3.0/10)*m_1**(1.0/4)*R_10**(-3.0/4)*f**(6.0/5) opacity = 190*alfa**(-4.0/5)*M_16**(1.0/5)*f**(4.0/5) viscosity = 1.8*10**14*alfa**(4.0/5)*M_16**(3.0/10)*m_1**(-1.0/4)*R_10**(3.0/4)*f**(6.0/5) radial_velocity = 2.7*10**4*alfa**(4.0/5)*M_16**(3.0/10)*m_1**(-1.0/4)*R_10**(-1.0/4)*f**(-14.0/5) R_10_l.append(R_10) surface_density_l.append(surface_density) height_l.append(height) density_l.append(density) temperature_l.append(temperature) opacity_l.append(opacity) viscosity_l.append(viscosity) radial_velocity_l.append(radial_velocity) Fx.append(f) #transformation R_10 to kolimeters R_km = [ x / 10**(-4) for x in R_10_l] return R_km, surface_density_l, height_l, density_l,temperature_l,opacity_l,viscosity_l,radial_velocity_l,Fx #for definitions of parameters look up r_in =1.0001*10**(-4) r_out =10**(-2) step = 10**(-6) alfa = 0.5 M_16 = 63 m_1 = 1.5 R_hv = 1.0*10**(-4) lists=solutions(r_in,r_out,step,alfa,M_16,m_1,R_hv) print 30*"-" print "Used parameter values" print 30*"-" print "innder radius:", 10*".",r_in, 10*".", "[10$^{10}$ cm]" print "outer radius:", 10*".", r_out, 10*".", "[10$^{10}$ cm]" print "step of plot:", 10*".", step, 10*".", "[10$^{10}$ cm]" print "parameter of accretion alfa:", 10*".", alfa print "accretion flow:", 10*".", M_16, 10*".", "[10$^6$ g.s${-1)}$]" print "mass of compact object:", 10*".", m_1, 10*".", "[solar mass]" print "radius of compact object:", 10*".", R_hv, 10*".", "[10$^{10}$ cm]" plt.plot(lists[0], lists[1]) plt.title('surface density') plt.xlabel('radius [km]') plt.ylabel('surface density [g.cm$^{-2}$] ') plt.grid() plt.savefig("surface density") plt.gcf().clear() plt.plot(lists[0], lists[2]) plt.title('height') plt.xlabel('radius [km]') plt.ylabel('height [cm] ') plt.grid() plt.savefig("height") plt.gcf().clear() plt.plot(lists[0], lists[3]) plt.title('density') plt.xlabel('radius [km]') plt.ylabel('density [g.cm$^{-3}$] ') plt.grid() plt.savefig("density") plt.gcf().clear() plt.plot(lists[0], lists[4]) plt.title('temperature') plt.xlabel('radius [km]') plt.ylabel('temperature [K] ') plt.grid() plt.savefig("temperature") plt.gcf().clear() plt.plot(lists[0], lists[5]) plt.title('opacity') plt.xlabel('radius [km]') plt.ylabel('opacity ') plt.grid() plt.savefig("opacity") plt.gcf().clear() plt.plot(lists[0], lists[6]) plt.title('viscosity') plt.xlabel('radius [km]') plt.ylabel('viscosity [cm$^{2}$.s$^{-1}$] ') plt.grid() plt.savefig("viscosity") plt.gcf().clear() plt.plot(lists[0], lists[7]) plt.title('radial velocity') plt.xlabel('radius [km]') plt.ylabel('radial velocity [cm.s$^{-1}$] ') plt.grid() plt.savefig("radial velocity") plt.gcf().clear()
mit
lalitkumarj/NEXT-psych
next/apps/TupleBanditsPureExploration/Dashboard.py
1
3313
""" TupleBanditsPureExplorationDashboard author: Nick Glattard, n.glattard@gmail.com last updated: 4/24/2015 ###################################### TupleBanditsPureExplorationDashboard """ import json import numpy import numpy.random import matplotlib.pyplot as plt from datetime import datetime from datetime import timedelta from next.utils import utils from next.apps.AppDashboard import AppDashboard class TupleBanditsPureExplorationDashboard(AppDashboard): def __init__(self,db,ell): AppDashboard.__init__(self,db,ell) def get_app_supported_stats(self): """ Returns a list of dictionaries describing the identifier (stat_id) and necessary params inputs to be used when calling getStats Expected output (list of dicts, each with fields): (string) stat_id : the identiifer of the statistic (string) description : docstring of describing outputs (list of string) necessary_params : list where each string describes the type of param input like 'alg_label' or 'task' """ stat_list = self.get_supported_stats() stat = {} stat['stat_id'] = 'most_current_ranking' stat['description'] = self.most_current_ranking.__doc__ stat['necessary_params'] = ['alg_label'] stat_list.append(stat) return stat_list def most_current_ranking(self,app_id,exp_uid,alg_label): """ Description: Returns a ranking of arms in the form of a list of dictionaries, which is conveneint for downstream applications Expected input: (string) alg_label : must be a valid alg_label contained in alg_list list of dicts The 'headers' contains a list of dictionaries corresponding to each column of the table with fields 'label' and 'field' where 'label' is the label of the column to be put on top of the table, and 'field' is the name of the field in 'data' that the column correpsonds to Expected output (in dict): plot_type : 'columnar_table' headers : [ {'label':'Rank','field':'rank'}, {'label':'Target','field':'index'} ] (list of dicts with fields) data (each dict is a row, each field is the column for that row): (int) index : index of target (int) ranking : rank (0 to number of targets - 1) representing belief of being best arm """ alg_list,didSucceed,message = self.db.get(app_id+':experiments',exp_uid,'alg_list') for algorithm in alg_list: if algorithm['alg_label'] == alg_label: alg_id = algorithm['alg_id'] alg_uid = algorithm['alg_uid'] list_of_log_dict,didSucceed,message = self.ell.get_logs_with_filter(app_id+':ALG-EVALUATION',{'alg_uid':alg_uid}) list_of_log_dict = sorted(list_of_log_dict, key=lambda k: k['num_reported_answers'] ) print didSucceed, message item = list_of_log_dict[-1] return_dict = {} return_dict['headers'] = [{'label':'Rank','field':'rank'},{'label':'Target','field':'index'},{'label':'Score','field':'score'},{'label':'Precision','field':'precision'}] return_dict['data'] = item['targets'] return_dict['plot_type'] = 'columnar_table' return return_dict
apache-2.0
redarmy30/Eurobot-2017
old year/RESET-master/Machine_vision/get_position.py
2
3426
#!/usr/bin/env python2 import numpy as np import cv2 from matplotlib import pyplot as plt from math import sin, cos, tan, sqrt, pi, atan from operator import itemgetter import timeit #start = timeit.timeit() h = 0.37 #the vertical distance from the ground to camera [in meters] alpha = pi*(28.3)/180.0 #the inclination angle in degrees F = 0.25 #the focal distance [in meters]0.00367 Nx = 640.0 #number of pixels along x axis on the focal plane Ny = 480.0 #number of pixels along the y axis on the focal plane psi = 78.0*pi/180.0 # maximum angular resolution in diagonal Tetha = 2.0*atan((tan(psi/2.0))*3.0/5.0) # maximum resolution angle for vertical view Fi = 2.0*atan((tan(psi/2.0))*4.0/5.0) # maximum resolution angle for horizontal view #Initial calculations gamma = pi/2.0 - alpha #calculate the inclination of focal plane YM = F/cos(alpha) - h*tan(alpha) YA = F*cos(alpha) ZA = h - F*sin(alpha) ksim = 2.0*F*tan(Tetha/2.0) etham = 2.0*F*tan(Tetha/2.0) # camera initialisation #DEFINE CALSSIFICATION OF OBJECTS params = cv2.SimpleBlobDetector_Params() # Change thresholds params.minThreshold = 1 params.maxThreshold = 2000 # Filter by Area. params.filterByArea = 1 params.minArea = 1000 params.maxArea = 100000 """# Filter by Circularity params.filterByCircularity = True params.minCircularity = 0.1""" """# Filter by Convexity params.filterByConvexity = True params.minConvexity = 0.1 params.maxConvexity = 1""" """# Filter by Inertia params.filterByInertia = True params.minInertiaRatio = 0""" """#Filter by color params.filterByColor = 1 params.blobColor = 0;0;0""" #detector = cv2.SimpleBlobDetector_create(params) detector = cv2.SimpleBlobDetector(params) #- use this if line 57 returns error!!! class GetObjectPosition(object): def get_position(self): cap = cv2.VideoCapture(0) #cap.set(7, 15) _, frame = cap.read() im = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) keypoints = detector.detect(im) im_with_keypoints = cv2.drawKeypoints(frame, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) a = len (keypoints) points = [] screenpoints = [] for keypoint in keypoints: x0 = keypoint.pt[0] y0 = keypoint.pt[1] cx = x0 cy = y0 ksiE = cx*ksim/Nx ethaE = cy*etham/Ny Nx1 = ksim Ny1 = etham ksiA = ksiE - Nx1/2.0 ethaA = -(ethaE - Ny1/2.0) YA1 = YA + ethaA*cos(gamma) ZA1 = (YA1 - YM)*tan(gamma) XA1 = ksiA t = h/(h-ZA1) X = XA1*t Y = YA1*t R0 = sqrt(X**2.0+Y**2.0) X = int(X*1000.0) Y = int(Y*1000.0) R0 = int(R0*1000.0) points.append((X, Y, R0)) screenpoints.append((x0,y0)) points1 = str(points) cv2.imwrite('result.png',im_with_keypoints) if not points: return z = sorted(points, key=itemgetter(2)) z1 = str(z) b = z[0] points = str(points) file = open("result.txt", "w") file.write("unsorted list") file.write(points)#unsorted file.write("\n") file.write("sorted list") file.write(z1)#sorted file.write("\n") file.write("The nearest objest is:") file.write(str(b)) file.close() #img1 = cv2.imread('result.png') #img2 = cv2.putText(img = img1,text = points,org = (0,Ny),fontFace = cv2.FONT_HERSHEY_DUPLEX,fontScale = 0.5, #color = (1,1,255)) cv2.imwrite('result1.png',img2) return(b) del(cap) a = GetObjectPosition() coordinates = a.get_position() print coordinates #end = timeit.timeit() #print end - start
mit
faroit/loudness
python/tests/test_OME.py
1
2084
import numpy as np import matplotlib.pyplot as plt import loudness as ln def plotResponse(freqPoints, dataPoints, freqsInterp, responseInterp, ylim=(-40, 10), title = ""): if np.any(dataPoints): plt.semilogx(freqPoints, dataPoints, 'o') plt.semilogx(freqsInterp, responseInterp) plt.xlim(20, 20e3) plt.ylim(ylim) plt.xlabel("Frequency, Hz") plt.ylabel("Response, dB") plt.title(title) plt.show() def plotMiddleEar(filterType, ylim=(-40, 0)): freqs = np.arange(20, 20000, 2) ome = ln.OME(filterType, ln.OME.NONE) ome.interpolateResponse(freqs) response = ome.getResponse() freqPoints = ome.getMiddleEarFreqPoints() dataPoints = ome.getMiddleEardB() plotResponse(freqPoints, dataPoints, freqs, response, ylim) def plotOuterEar(filterType, ylim=(-40, 0)): freqs = np.arange(20, 20000, 2) ome = ln.OME(ln.OME.NONE, filterType) ome.interpolateResponse(freqs) response = ome.getResponse() freqPoints = ome.getOuterEarFreqPoints() dataPoints = ome.getOuterEardB() plotResponse(freqPoints, dataPoints, freqs, response, ylim) def plotCombined(middleFilterType, outerFilterType, ylim=(-40, 10)): freqs = np.arange(20, 20000, 2) ome = ln.OME(middleFilterType, outerFilterType) ome.interpolateResponse(freqs) response = ome.getResponse() plotResponse(None, None, freqs, response, ylim) plt.figure(1) plotMiddleEar(ln.OME.ANSIS342007_MIDDLE_EAR, (-40, 0)) plt.figure(2) plotMiddleEar(ln.OME.CHGM2011_MIDDLE_EAR, (-40, 10)) plt.figure(2) plotMiddleEar(ln.OME.ANSIS342007_MIDDLE_EAR_HPF, (-40, 0)) plt.figure(3) plotOuterEar(ln.OME.ANSIS342007_FREEFIELD, (-5, 20)) plt.figure(4) plotOuterEar(ln.OME.ANSIS342007_DIFFUSEFIELD, (-5, 20)) plt.figure(5) plotOuterEar(ln.OME.BD_DT990, (-10, 10)) plt.figure(6) plotCombined(ln.OME.ANSIS342007_MIDDLE_EAR, ln.OME.ANSIS342007_FREEFIELD, (-40, 10)) plt.figure(7) plotCombined(ln.OME.ANSIS342007_MIDDLE_EAR, ln.OME.BD_DT990, (-40, 10))
gpl-3.0
wangmiao1981/spark
python/pyspark/pandas/tests/test_stats.py
6
18881
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from distutils.version import LooseVersion import numpy as np import pandas as pd try: from pandas._testing import makeMissingDataframe except ImportError: from pandas.util.testing import makeMissingDataframe from pyspark import pandas as ps from pyspark.pandas.config import option_context from pyspark.testing.pandasutils import PandasOnSparkTestCase, SPARK_CONF_ARROW_ENABLED from pyspark.testing.sqlutils import SQLTestUtils class StatsTest(PandasOnSparkTestCase, SQLTestUtils): def _test_stat_functions(self, pdf_or_pser, psdf_or_psser): functions = ["max", "min", "mean", "sum", "count"] for funcname in functions: self.assert_eq(getattr(psdf_or_psser, funcname)(), getattr(pdf_or_pser, funcname)()) functions = ["std", "var", "product", "sem"] for funcname in functions: self.assert_eq( getattr(psdf_or_psser, funcname)(), getattr(pdf_or_pser, funcname)(), check_exact=False, ) functions = ["std", "var", "sem"] for funcname in functions: self.assert_eq( getattr(psdf_or_psser, funcname)(ddof=0), getattr(pdf_or_pser, funcname)(ddof=0), check_exact=False, ) # NOTE: To test skew, kurt, and median, just make sure they run. # The numbers are different in spark and pandas. functions = ["skew", "kurt", "median"] for funcname in functions: getattr(psdf_or_psser, funcname)() def test_stat_functions(self): pdf = pd.DataFrame({"A": [1, 2, 3, 4], "B": [1, 2, 3, 4], "C": [1, np.nan, 3, np.nan]}) psdf = ps.from_pandas(pdf) self._test_stat_functions(pdf.A, psdf.A) self._test_stat_functions(pdf, psdf) # empty self._test_stat_functions(pdf.A.loc[[]], psdf.A.loc[[]]) self._test_stat_functions(pdf.loc[[]], psdf.loc[[]]) def test_stat_functions_multiindex_column(self): arrays = [np.array(["A", "A", "B", "B"]), np.array(["one", "two", "one", "two"])] pdf = pd.DataFrame(np.random.randn(3, 4), index=["A", "B", "C"], columns=arrays) psdf = ps.from_pandas(pdf) self._test_stat_functions(pdf.A, psdf.A) self._test_stat_functions(pdf, psdf) def test_stat_functions_with_no_numeric_columns(self): pdf = pd.DataFrame( { "A": ["a", None, "c", "d", None, "f", "g"], "B": ["A", "B", "C", None, "E", "F", None], } ) psdf = ps.from_pandas(pdf) self._test_stat_functions(pdf, psdf) def test_sum(self): pdf = pd.DataFrame({"a": [1, 2, 3, np.nan], "b": [0.1, np.nan, 0.3, np.nan]}) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.sum(), pdf.sum()) self.assert_eq(psdf.sum(axis=1), pdf.sum(axis=1)) self.assert_eq(psdf.sum(min_count=3), pdf.sum(min_count=3)) self.assert_eq(psdf.sum(axis=1, min_count=1), pdf.sum(axis=1, min_count=1)) self.assert_eq(psdf.loc[[]].sum(), pdf.loc[[]].sum()) self.assert_eq(psdf.loc[[]].sum(min_count=1), pdf.loc[[]].sum(min_count=1)) self.assert_eq(psdf["a"].sum(), pdf["a"].sum()) self.assert_eq(psdf["a"].sum(min_count=3), pdf["a"].sum(min_count=3)) self.assert_eq(psdf["b"].sum(min_count=3), pdf["b"].sum(min_count=3)) self.assert_eq(psdf["a"].loc[[]].sum(), pdf["a"].loc[[]].sum()) self.assert_eq(psdf["a"].loc[[]].sum(min_count=1), pdf["a"].loc[[]].sum(min_count=1)) def test_product(self): pdf = pd.DataFrame( {"a": [1, -2, -3, np.nan], "b": [0.1, np.nan, -0.3, np.nan], "c": [10, 20, 0, -10]} ) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.product(), pdf.product(), check_exact=False) self.assert_eq(psdf.product(axis=1), pdf.product(axis=1)) self.assert_eq(psdf.product(min_count=3), pdf.product(min_count=3), check_exact=False) self.assert_eq(psdf.product(axis=1, min_count=1), pdf.product(axis=1, min_count=1)) self.assert_eq(psdf.loc[[]].product(), pdf.loc[[]].product()) self.assert_eq(psdf.loc[[]].product(min_count=1), pdf.loc[[]].product(min_count=1)) self.assert_eq(psdf["a"].product(), pdf["a"].product(), check_exact=False) self.assert_eq( psdf["a"].product(min_count=3), pdf["a"].product(min_count=3), check_exact=False ) self.assert_eq(psdf["b"].product(min_count=3), pdf["b"].product(min_count=3)) self.assert_eq(psdf["c"].product(min_count=3), pdf["c"].product(min_count=3)) self.assert_eq(psdf["a"].loc[[]].product(), pdf["a"].loc[[]].product()) self.assert_eq( psdf["a"].loc[[]].product(min_count=1), pdf["a"].loc[[]].product(min_count=1) ) def test_abs(self): pdf = pd.DataFrame( { "A": [1, -2, np.nan, -4, 5], "B": [1.0, -2, np.nan, -4, 5], "C": [-6.0, -7, -8, np.nan, 10], "D": ["a", "b", "c", "d", np.nan], "E": [True, np.nan, False, True, True], } ) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.A.abs(), pdf.A.abs()) self.assert_eq(psdf.B.abs(), pdf.B.abs()) self.assert_eq(psdf.E.abs(), pdf.E.abs()) # pandas' bug? # self.assert_eq(psdf[["B", "C", "E"]].abs(), pdf[["B", "C", "E"]].abs()) self.assert_eq(psdf[["B", "C"]].abs(), pdf[["B", "C"]].abs()) self.assert_eq(psdf[["E"]].abs(), pdf[["E"]].abs()) with self.assertRaisesRegex( TypeError, "bad operand type for abs\\(\\): object \\(string\\)" ): psdf.abs() with self.assertRaisesRegex( TypeError, "bad operand type for abs\\(\\): object \\(string\\)" ): psdf.D.abs() def test_axis_on_dataframe(self): # The number of each count is intentionally big # because when data is small, it executes a shortcut. # Less than 'compute.shortcut_limit' will execute a shortcut # by using collected pandas dataframe directly. # now we set the 'compute.shortcut_limit' as 1000 explicitly with option_context("compute.shortcut_limit", 1000): pdf = pd.DataFrame( { "A": [1, -2, 3, -4, 5] * 300, "B": [1.0, -2, 3, -4, 5] * 300, "C": [-6.0, -7, -8, -9, 10] * 300, "D": [True, False, True, False, False] * 300, }, index=range(10, 15001, 10), ) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.count(axis=1), pdf.count(axis=1)) self.assert_eq(psdf.var(axis=1), pdf.var(axis=1)) self.assert_eq(psdf.var(axis=1, ddof=0), pdf.var(axis=1, ddof=0)) self.assert_eq(psdf.std(axis=1), pdf.std(axis=1)) self.assert_eq(psdf.std(axis=1, ddof=0), pdf.std(axis=1, ddof=0)) self.assert_eq(psdf.max(axis=1), pdf.max(axis=1)) self.assert_eq(psdf.min(axis=1), pdf.min(axis=1)) self.assert_eq(psdf.sum(axis=1), pdf.sum(axis=1)) self.assert_eq(psdf.product(axis=1), pdf.product(axis=1)) self.assert_eq(psdf.kurtosis(axis=1), pdf.kurtosis(axis=1)) self.assert_eq(psdf.skew(axis=1), pdf.skew(axis=1)) self.assert_eq(psdf.mean(axis=1), pdf.mean(axis=1)) self.assert_eq(psdf.sem(axis=1), pdf.sem(axis=1)) self.assert_eq(psdf.sem(axis=1, ddof=0), pdf.sem(axis=1, ddof=0)) self.assert_eq( psdf.count(axis=1, numeric_only=True), pdf.count(axis=1, numeric_only=True) ) self.assert_eq(psdf.var(axis=1, numeric_only=True), pdf.var(axis=1, numeric_only=True)) self.assert_eq( psdf.var(axis=1, ddof=0, numeric_only=True), pdf.var(axis=1, ddof=0, numeric_only=True), ) self.assert_eq(psdf.std(axis=1, numeric_only=True), pdf.std(axis=1, numeric_only=True)) self.assert_eq( psdf.std(axis=1, ddof=0, numeric_only=True), pdf.std(axis=1, ddof=0, numeric_only=True), ) self.assert_eq( psdf.max(axis=1, numeric_only=True), pdf.max(axis=1, numeric_only=True).astype(float), ) self.assert_eq( psdf.min(axis=1, numeric_only=True), pdf.min(axis=1, numeric_only=True).astype(float), ) self.assert_eq( psdf.sum(axis=1, numeric_only=True), pdf.sum(axis=1, numeric_only=True).astype(float), ) self.assert_eq( psdf.product(axis=1, numeric_only=True), pdf.product(axis=1, numeric_only=True).astype(float), ) self.assert_eq( psdf.kurtosis(axis=1, numeric_only=True), pdf.kurtosis(axis=1, numeric_only=True) ) self.assert_eq( psdf.skew(axis=1, numeric_only=True), pdf.skew(axis=1, numeric_only=True) ) self.assert_eq( psdf.mean(axis=1, numeric_only=True), pdf.mean(axis=1, numeric_only=True) ) self.assert_eq(psdf.sem(axis=1, numeric_only=True), pdf.sem(axis=1, numeric_only=True)) self.assert_eq( psdf.sem(axis=1, ddof=0, numeric_only=True), pdf.sem(axis=1, ddof=0, numeric_only=True), ) def test_corr(self): # Disable arrow execution since corr() is using UDT internally which is not supported. with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}): # DataFrame # we do not handle NaNs for now pdf = makeMissingDataframe(0.3, 42).fillna(0) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.corr(), pdf.corr(), check_exact=False) # Series pser_a = pdf.A pser_b = pdf.B psser_a = psdf.A psser_b = psdf.B self.assertAlmostEqual(psser_a.corr(psser_b), pser_a.corr(pser_b)) self.assertRaises(TypeError, lambda: psser_a.corr(psdf)) # multi-index columns columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B"), ("Y", "C"), ("Z", "D")]) pdf.columns = columns psdf.columns = columns self.assert_eq(psdf.corr(), pdf.corr(), check_exact=False) # Series pser_xa = pdf[("X", "A")] pser_xb = pdf[("X", "B")] psser_xa = psdf[("X", "A")] psser_xb = psdf[("X", "B")] self.assert_eq(psser_xa.corr(psser_xb), pser_xa.corr(pser_xb), almost=True) def test_cov_corr_meta(self): # Disable arrow execution since corr() is using UDT internally which is not supported. with self.sql_conf({SPARK_CONF_ARROW_ENABLED: False}): pdf = pd.DataFrame( { "a": np.array([1, 2, 3], dtype="i1"), "b": np.array([1, 2, 3], dtype="i2"), "c": np.array([1, 2, 3], dtype="i4"), "d": np.array([1, 2, 3]), "e": np.array([1.0, 2.0, 3.0], dtype="f4"), "f": np.array([1.0, 2.0, 3.0]), "g": np.array([True, False, True]), "h": np.array(list("abc")), }, index=pd.Index([1, 2, 3], name="myindex"), ) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.corr(), pdf.corr()) def test_stats_on_boolean_dataframe(self): pdf = pd.DataFrame({"A": [True, False, True], "B": [False, False, True]}) psdf = ps.from_pandas(pdf) self.assert_eq(psdf.min(), pdf.min()) self.assert_eq(psdf.max(), pdf.max()) self.assert_eq(psdf.count(), pdf.count()) self.assert_eq(psdf.sum(), pdf.sum()) self.assert_eq(psdf.product(), pdf.product()) self.assert_eq(psdf.mean(), pdf.mean()) self.assert_eq(psdf.var(), pdf.var(), check_exact=False) self.assert_eq(psdf.var(ddof=0), pdf.var(ddof=0), check_exact=False) self.assert_eq(psdf.std(), pdf.std(), check_exact=False) self.assert_eq(psdf.std(ddof=0), pdf.std(ddof=0), check_exact=False) self.assert_eq(psdf.sem(), pdf.sem(), check_exact=False) self.assert_eq(psdf.sem(ddof=0), pdf.sem(ddof=0), check_exact=False) def test_stats_on_boolean_series(self): pser = pd.Series([True, False, True]) psser = ps.from_pandas(pser) self.assert_eq(psser.min(), pser.min()) self.assert_eq(psser.max(), pser.max()) self.assert_eq(psser.count(), pser.count()) self.assert_eq(psser.sum(), pser.sum()) self.assert_eq(psser.product(), pser.product()) self.assert_eq(psser.mean(), pser.mean()) self.assert_eq(psser.var(), pser.var(), almost=True) self.assert_eq(psser.var(ddof=0), pser.var(ddof=0), almost=True) self.assert_eq(psser.std(), pser.std(), almost=True) self.assert_eq(psser.std(ddof=0), pser.std(ddof=0), almost=True) self.assert_eq(psser.sem(), pser.sem(), almost=True) self.assert_eq(psser.sem(ddof=0), pser.sem(ddof=0), almost=True) def test_stats_on_non_numeric_columns_should_be_discarded_if_numeric_only_is_true(self): pdf = pd.DataFrame({"i": [0, 1, 2], "b": [False, False, True], "s": ["x", "y", "z"]}) psdf = ps.from_pandas(pdf) self.assert_eq( psdf[["i", "s"]].max(numeric_only=True), pdf[["i", "s"]].max(numeric_only=True) ) self.assert_eq( psdf[["b", "s"]].max(numeric_only=True), pdf[["b", "s"]].max(numeric_only=True) ) self.assert_eq( psdf[["i", "s"]].min(numeric_only=True), pdf[["i", "s"]].min(numeric_only=True) ) self.assert_eq( psdf[["b", "s"]].min(numeric_only=True), pdf[["b", "s"]].min(numeric_only=True) ) self.assert_eq(psdf.count(numeric_only=True), pdf.count(numeric_only=True)) if LooseVersion(pd.__version__) >= LooseVersion("1.0.0"): self.assert_eq(psdf.sum(numeric_only=True), pdf.sum(numeric_only=True)) self.assert_eq(psdf.product(numeric_only=True), pdf.product(numeric_only=True)) else: self.assert_eq(psdf.sum(numeric_only=True), pdf.sum(numeric_only=True).astype(int)) self.assert_eq( psdf.product(numeric_only=True), pdf.product(numeric_only=True).astype(int) ) self.assert_eq(psdf.mean(numeric_only=True), pdf.mean(numeric_only=True)) self.assert_eq(psdf.var(numeric_only=True), pdf.var(numeric_only=True), check_exact=False) self.assert_eq( psdf.var(ddof=0, numeric_only=True), pdf.var(ddof=0, numeric_only=True), check_exact=False, ) self.assert_eq(psdf.std(numeric_only=True), pdf.std(numeric_only=True), check_exact=False) self.assert_eq( psdf.std(ddof=0, numeric_only=True), pdf.std(ddof=0, numeric_only=True), check_exact=False, ) self.assert_eq(psdf.sem(numeric_only=True), pdf.sem(numeric_only=True), check_exact=False) self.assert_eq( psdf.sem(ddof=0, numeric_only=True), pdf.sem(ddof=0, numeric_only=True), check_exact=False, ) self.assert_eq(len(psdf.median(numeric_only=True)), len(pdf.median(numeric_only=True))) self.assert_eq(len(psdf.kurtosis(numeric_only=True)), len(pdf.kurtosis(numeric_only=True))) self.assert_eq(len(psdf.skew(numeric_only=True)), len(pdf.skew(numeric_only=True))) # Boolean was excluded because of a behavior change in NumPy # https://github.com/numpy/numpy/pull/16273#discussion_r641264085 which pandas inherits # but this behavior is inconsistent in pandas context. # Boolean column in quantile tests are excluded for now. # TODO(SPARK-35555): track and match the behavior of quantile to pandas' pdf = pd.DataFrame({"i": [0, 1, 2], "s": ["x", "y", "z"]}) psdf = ps.from_pandas(pdf) self.assert_eq( len(psdf.quantile(q=0.5, numeric_only=True)), len(pdf.quantile(q=0.5, numeric_only=True)), ) self.assert_eq( len(psdf.quantile(q=[0.25, 0.5, 0.75], numeric_only=True)), len(pdf.quantile(q=[0.25, 0.5, 0.75], numeric_only=True)), ) def test_numeric_only_unsupported(self): pdf = pd.DataFrame({"i": [0, 1, 2], "b": [False, False, True], "s": ["x", "y", "z"]}) psdf = ps.from_pandas(pdf) if LooseVersion(pd.__version__) >= LooseVersion("1.0.0"): self.assert_eq(psdf.sum(numeric_only=True), pdf.sum(numeric_only=True)) self.assert_eq( psdf[["i", "b"]].sum(numeric_only=False), pdf[["i", "b"]].sum(numeric_only=False) ) else: self.assert_eq(psdf.sum(numeric_only=True), pdf.sum(numeric_only=True).astype(int)) self.assert_eq( psdf[["i", "b"]].sum(numeric_only=False), pdf[["i", "b"]].sum(numeric_only=False).astype(int), ) with self.assertRaisesRegex(TypeError, "Could not convert object \\(string\\) to numeric"): psdf.sum(numeric_only=False) with self.assertRaisesRegex(TypeError, "Could not convert object \\(string\\) to numeric"): psdf.s.sum() if __name__ == "__main__": import unittest from pyspark.pandas.tests.test_stats import * # noqa: F401 try: import xmlrunner # type: ignore[import] testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2) except ImportError: testRunner = None unittest.main(testRunner=testRunner, verbosity=2)
apache-2.0
joergkappes/opengm
src/interfaces/python/examples/python_visitor_gui.py
14
1377
""" Usage: python_visitor_gui.py This script shows how one can implement visitors in pure python and inject them into OpenGM solver. ( not all OpenGM solvers support this kind of code injection ) """ import opengm import numpy import matplotlib from matplotlib import pyplot as plt shape=[100,100] numLabels=10 unaries=numpy.random.rand(shape[0], shape[1],numLabels) potts=opengm.PottsFunction([numLabels,numLabels],0.0,0.4) gm=opengm.grid2d2Order(unaries=unaries,regularizer=potts) inf=opengm.inference.BeliefPropagation(gm,parameter=opengm.InfParam(damping=0.5)) class PyCallback(object): def __init__(self,shape,numLabels): self.shape=shape self.numLabels=numLabels self.cmap = matplotlib.colors.ListedColormap ( numpy.random.rand ( self.numLabels,3)) matplotlib.interactive(True) def begin(self,inference): print "begin of inference" def end(self,inference): print "end of inference" def visit(self,inference): gm=inference.gm() labelVector=inference.arg() print "energy ",gm.evaluate(labelVector) labelVector=labelVector.reshape(self.shape) plt.imshow(labelVector*255.0, cmap=self.cmap,interpolation="nearest") plt.draw() callback=PyCallback(shape,numLabels) visitor=inf.pythonVisitor(callback,visitNth=1) inf.infer(visitor) argmin=inf.arg()
mit
petebachant/PXL
pxl/tests/test_fdiff.py
1
1436
from __future__ import division, print_function from .. import fdiff from ..fdiff import * import matplotlib.pyplot as plt import pandas as pd import os import numpy as np from uncertainties import unumpy plot = False def test_second_order_diff(): """Test `second_order_diff`.""" # Create a non-equally spaced x vector x = np.append(np.linspace(0, np.pi, 100), np.linspace(np.pi + 0.01, 2*np.pi, 400)) u = np.sin(x) dudx = second_order_diff(u, x) assert dudx.shape == u.shape # Assert that this function is almost identical to cos(x) np.testing.assert_allclose(dudx, np.cos(x), rtol=1e-3) if plot: plt.plot(x, dudx, "-o", lw=2, alpha=0.5) plt.plot(x, np.cos(x), "--^", lw=2, alpha=0.5) plt.show() def test_second_order_diff_uncertainties(): """Test that `second_order_diff` works with uncertainties.""" # Create a non-equally spaced x vector x = np.append(np.linspace(0, np.pi, 50), np.linspace(np.pi + 0.01, 2*np.pi, 100)) x_unc = unumpy.uarray(x, np.ones(len(x))*1e-3) u = unumpy.uarray(np.sin(x), np.ones(len(x))*1e-2) dudx = second_order_diff(u, x) print(dudx[:5]) print(dudx[-5:]) if plot: plt.errorbar(x, unumpy.nominal_values(dudx), yerr=unumpy.std_devs(dudx), fmt="-o", lw=2, alpha=0.5) plt.plot(x, np.cos(x), "--^", lw=2, alpha=0.5) plt.show()
gpl-3.0
nealbob/nealbob.github.io
_site/code/multicore_storage_sim.py
2
2177
import numpy as np from matplotlib import pyplot as plt import time from multiprocessing import Process from multiprocessing.queues import Queue def retry_on_eintr(function, *args, **kw): while True: try: return function(*args, **kw) except IOError, e: if e.errno == errno.EINTR: continue else: raise class RetryQueue(Queue): """Queue which will retry if interrupted with EINTR.""" def get(self, block=True, timeout=None): return retry_on_eintr(Queue.get, self, block, timeout) def simulate(K, mu, sig, Sbar, T, multi=False, que=0, jobno=0): np.random.seed(jobno) S = np.zeros(T+1) W = np.zeros(T+1) I = np.zeros(T+1) S[0] = K for t in range(T): W[t] = min(S[t], Sbar) I[t+1] = max(np.random.normal(mu, sig), 0) S[t+1] = min(S[t] - W[t] + I[t+1], K) if multi: que.put(S) else: return S def multi_sim(CORES=2, T=100): results = [] ques = [Queue() for i in range(CORES)] args = [(100, 70, 70, 70, int(T/CORES), True, ques[i], i) for i in range(CORES)] jobs = [Process(target=simulate, args=(a)) for a in args] for j in jobs: j.start() for q in ques: results.append(q.get()) for j in jobs: j.join() S = np.hstack(results) return S """ ### Sample size T = 1000000 # Single core run ================================== tic = time.time() S = simulate(100, 70, 70, 70, T) toc = time.time() print 'Single core run time: ' + str(round(toc - tic,3)) plt.plot(S[0:100]) plt.show() # Multi core run ================================== tic = time.time() CORES = 2 results = [] ques = [Queue() for i in range(CORES)] args = [(100, 70, 70, 70, int(T/CORES), True, ques[i], i) for i in range(CORES)] jobs = [Process(target=simulate, args=(a)) for a in args] for j in jobs: j.start() for q in ques: results.append(q.get()) for j in jobs: j.join() S = np.hstack(results) toc = time.time() print 'Multi-core run time: ' + str(toc - tic) plt.plot(S[0:100]) plt.show() print S.shape plt.scatter(results[0], results[1]) plt.show() """
mit
asimshankar/tensorflow
tensorflow/contrib/learn/python/learn/preprocessing/tests/categorical_test.py
137
2219
# encoding: utf-8 # Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Categorical tests.""" # limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.contrib.learn.python.learn.learn_io import HAS_PANDAS from tensorflow.contrib.learn.python.learn.preprocessing import categorical from tensorflow.python.platform import test class CategoricalTest(test.TestCase): """Categorical tests.""" def testSingleCategoricalProcessor(self): cat_processor = categorical.CategoricalProcessor(min_frequency=1) x = cat_processor.fit_transform([["0"], [1], [float("nan")], ["C"], ["C"], [1], ["0"], [np.nan], [3]]) self.assertAllEqual(list(x), [[2], [1], [0], [3], [3], [1], [2], [0], [0]]) def testSingleCategoricalProcessorPandasSingleDF(self): if HAS_PANDAS: import pandas as pd # pylint: disable=g-import-not-at-top cat_processor = categorical.CategoricalProcessor() data = pd.DataFrame({"Gender": ["Male", "Female", "Male"]}) x = list(cat_processor.fit_transform(data)) self.assertAllEqual(list(x), [[1], [2], [1]]) def testMultiCategoricalProcessor(self): cat_processor = categorical.CategoricalProcessor( min_frequency=0, share=False) x = cat_processor.fit_transform([["0", "Male"], [1, "Female"], ["3", "Male"]]) self.assertAllEqual(list(x), [[1, 1], [2, 2], [3, 1]]) if __name__ == "__main__": test.main()
apache-2.0
mhoffman/kmos
kmos/cli.py
1
16514
#!/usr/bin/env python """Entry point module for the command-line interface. The kmos executable should be on the program path, import this modules main function and run it. To call kmos command as you would from the shell, use :: kmos.cli.main('...') Every command can be shortened as long as it is non-ambiguous, e.g. :: kmos ex <xml-file> instead of :: kmos export <xml-file> etc. """ # Copyright 2009-2013 Max J. Hoffmann (mjhoffmann@gmail.com) # This file is part of kmos. # # kmos is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # kmos is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with kmos. If not, see <http://www.gnu.org/licenses/>. from __future__ import with_statement import os import shutil usage = {} usage['all'] = """kmos help all Display documentation for all commands. """ usage['benchmark'] = """kmos benchmark Run 1 mio. kMC steps on model in current directory and report runtime. """ usage['build'] = """kmos build Build kmc_model.%s from *f90 files in the current directory. Additional Parameters :: -d/--debug Turn on assertion statements in F90 code -n/--no-compiler-optimization Do not send optimizing flags to compiler. """ % ('pyd' if os.name == 'nt' else 'so') usage['help'] = """kmos help <command> Print usage information for the given command. """ usage['export'] = """kmos export <xml-file> [<export-path>] Take a kmos xml-file and export all generated source code to the export-path. There try to build the kmc_model.%s. Additional Parameters :: -s/--source-only Export source only and don't build binary -b/--backend (local_smart|lat_int) Choose backend. Default is "local_smart". lat_int is EXPERIMENTAL and not made for production, yet. -d/--debug Turn on assertion statements in F90 code. (Only active in compile step) --acf Build the modules base_acf.f90 and proclist_acf.f90. Default is false. This both modules contain functions to calculate ACF (autocorrelation function) and MSD (mean squared displacement). -n/--no-compiler-optimization Do not send optimizing flags to compiler. """ % ('pyd' if os.name == 'nt' else 'so') usage['settings-export'] = """kmos settings-export <xml-file> [<export-path>] Take a kmos xml-file and export kmc_settings.py to the export-path. """ usage['edit'] = """kmos edit <xml-file> Open the kmos xml-file in a GUI to edit the model. """ usage['import'] = """kmos import <xml-file> Take a kmos xml-file and open an ipython shell with the project_tree imported as pt. """ usage['rebuild'] = """kmos rebuild Export code and rebuild binary module from XML information included in kmc_settings.py in current directory. Additional Parameters :: -d/--debug Turn on assertion statements in F90 code """ usage['shell'] = """kmos shell Open an interactive shell and create a KMC_Model in it run == shell """ usage['run'] = """kmos run Open an interactive shell and create a KMC_Model in it run == shell """ usage['version'] = """kmos version Print version number and exit. """ usage['view'] = """kmos view Take a kmc_model.%s and kmc_settings.py in the same directory and start to simulate the model visually. Additional Parameters :: -v/--steps-per-frame <number> Number of steps per frame """ % ('pyd' if os.name == 'nt' else 'so') usage['xml'] = """kmos xml Print xml representation of model to stdout """ def get_options(args=None, get_parser=False): import optparse import os from glob import glob import kmos parser = optparse.OptionParser( 'Usage: %prog [help] (' + '|'.join(sorted(usage.keys())) + ') [options]', version=kmos.__version__) parser.add_option('-s', '--source-only', dest='source_only', action='store_true', default=False) parser.add_option('-p', '--path-to-f2py', dest='path_to_f2py', default='f2py') parser.add_option('-b', '--backend', dest='backend', default='local_smart') parser.add_option('-a', '--avoid-default-state', dest='avoid_default_state', action='store_true', default=False, ) parser.add_option('-v', '--steps-per-frame', dest='steps_per_frame', type='int', default='50000') parser.add_option('-d', '--debug', default=False, dest='debug', action='store_true') parser.add_option('-n', '--no-compiler-optimization', default=False, dest='no_optimize', action='store_true') parser.add_option('-o', '--overwrite', default=False, action='store_true') parser.add_option('-l', '--variable-length', dest='variable_length', default=95, type='int') parser.add_option('-c', '--catmap', default=False, action='store_true') parser.add_option('--acf', dest='acf', action='store_true', default=False, ) try: from numpy.distutils.fcompiler import get_default_fcompiler from numpy.distutils import log log.set_verbosity(-1, True) fcompiler = get_default_fcompiler() except: fcompiler = 'gfortran' parser.add_option('-f', '--fcompiler', dest='fcompiler', default=os.environ.get('F2PY_FCOMPILER', fcompiler)) if args is not None: options, args = parser.parse_args(args.split()) else: options, args = parser.parse_args() if len(args) < 1: parser.error('Command expected') if get_parser: return options, args, parser else: return options, args def match_keys(arg, usage, parser): """Try to match part of a command against the set of commands from usage. Throws an error if not successful. """ possible_args = [key for key in usage if key.startswith(arg)] if len(possible_args) == 0: parser.error('Command "%s" not understood.' % arg) elif len(possible_args) > 1: parser.error(('Command "%s" ambiguous.\n' 'Could be one of %s\n\n') % (arg, possible_args)) else: return possible_args[0] def main(args=None): """The CLI main entry point function. The optional argument args, can be used to directly supply command line argument like $ kmos <args> otherwise args will be taken from STDIN. """ from glob import glob options, args, parser = get_options(args, get_parser=True) global model, pt, np, cm_model if not args[0] in usage.keys(): args[0] = match_keys(args[0], usage, parser) if args[0] == 'benchmark': from sys import path path.append(os.path.abspath(os.curdir)) nsteps = 1000000 from time import time from kmos.run import KMC_Model model = KMC_Model(print_rates=False, banner=False) time0 = time() try: model.proclist.do_kmc_steps(nsteps) except: # kmos < 0.3 had no model.proclist.do_kmc_steps model.do_steps(nsteps) needed_time = time() - time0 print('Using the [%s] backend.' % model.get_backend()) print('%s steps took %.2f seconds' % (nsteps, needed_time)) print('Or %.2e steps/s' % (1e6 / needed_time)) model.deallocate() elif args[0] == 'build': from kmos.utils import build build(options) elif args[0] == 'edit': from kmos import gui gui.main() elif args[0] == 'settings-export': import kmos.types import kmos.io from kmos.io import ProcListWriter if len(args) < 2: parser.error('XML file and export path expected.') if len(args) < 3: out_dir = '%s_%s' % (os.path.splitext(args[1])[0], options.backend) print('No export path provided. Exporting to %s' % out_dir) args.append(out_dir) xml_file = args[1] export_dir = args[2] project = kmos.types.Project() project.import_file(xml_file) writer = ProcListWriter(project, export_dir) writer.write_settings() elif args[0] == 'export': import kmos.types import kmos.io from kmos.utils import build if len(args) < 2: parser.error('XML file and export path expected.') if len(args) < 3: out_dir = '%s_%s' % (os.path.splitext(args[1])[0], options.backend) print('No export path provided. Exporting to %s' % out_dir) args.append(out_dir) xml_file = args[1] export_dir = os.path.join(args[2], 'src') project = kmos.types.Project() project.import_file(xml_file) project.shorten_names(max_length=options.variable_length) kmos.io.export_source(project, export_dir, options=options) if ((os.name == 'posix' and os.uname()[0] in ['Linux', 'Darwin']) or os.name == 'nt') \ and not options.source_only: os.chdir(export_dir) build(options) for out in glob('kmc_*'): if os.path.exists('../%s' % out) : if options.overwrite : overwrite = 'y' else: overwrite = raw_input(('Should I overwrite existing %s ?' '[y/N] ') % out).lower() if overwrite.startswith('y') : print('Overwriting {out}'.format(**locals())) os.remove('../%s' % out) shutil.move(out, '..') else : print('Skipping {out}'.format(**locals())) else: shutil.move(out, '..') elif args[0] == 'settings-export': import kmos.io pt = kmos.io.import_file(args[1]) if len(args) < 3: out_dir = os.path.splitext(args[1])[0] print('No export path provided. Exporting kmc_settings.py to %s' % out_dir) args.append(out_dir) if not os.path.exists(args[2]): os.mkdir(args[2]) elif not os.path.isdir(args[2]): raise UserWarning("Cannot overwrite %s; Exiting;" % args[2]) writer = kmos.io.ProcListWriter(pt, args[2]) writer.write_settings() elif args[0] == 'help': if len(args) < 2: parser.error('Which help do you want?') if args[1] == 'all': for command in sorted(usage): print(usage[command]) elif args[1] in usage: print('Usage: %s\n' % usage[args[1]]) else: arg = match_keys(args[1], usage, parser) print('Usage: %s\n' % usage[arg]) elif args[0] == 'import': import kmos.io if not len(args) >= 2: raise UserWarning('XML file name expected.') pt = kmos.io.import_xml_file(args[1]) if len(args) == 2: sh(banner='Note: pt = kmos.io.import_xml(\'%s\')' % args[1]) elif len(args) == 3: # if optional 3rd argument is given, store model there and exit pt.save(args[2]) elif args[0] == 'rebuild': from time import sleep print('Will rebuild model from kmc_settings.py in current directory') print('Please do not interrupt,' ' build process, as you will most likely') print('loose the current model files.') sleep(2.) from sys import path path.append(os.path.abspath(os.curdir)) from tempfile import mktemp if not os.path.exists('kmc_model.so') \ and not os.path.exists('kmc_model.pyd'): raise Exception('No kmc_model.so found.') if not os.path.exists('kmc_settings.py'): raise Exception('No kmc_settings.py found.') from kmos.run import KMC_Model model = KMC_Model(print_rates=False, banner=False) tempfile = mktemp() f = file(tempfile, 'w') f.write(model.xml()) f.close() for kmc_model in glob('kmc_model.*'): os.remove(kmc_model) os.remove('kmc_settings.py') main('export %s -b %s .' % (tempfile, options.backend)) os.remove(tempfile) model.deallocate() elif args[0] in ['run', 'shell']: from sys import path path.append(os.path.abspath(os.curdir)) from kmos.run import KMC_Model # useful to have in interactive mode import numpy as np try: from matplotlib import pyplot as plt except: plt = None if options.catmap: import catmap import catmap.cli.kmc_runner seed = catmap.cli.kmc_runner.get_seed_from_path('.') cm_model = catmap.ReactionModel(setup_file='{seed}.mkm'.format(**locals())) catmap_message = '\nSide-loaded catmap_model {seed}.mkm into cm_model = ReactionModel(setup_file="{seed}.mkm")'.format(**locals()) else: catmap_message = '' try: model = KMC_Model(print_rates=False) except: print("Warning: could not import kmc_model!" " Please make sure you are in the right directory") sh(banner='Note: model = KMC_Model(print_rates=False){catmap_message}'.format(**locals())) try: model.deallocate() except: print("Warning: could not deallocate model. Was is allocated?") elif args[0] == 'version': from kmos import VERSION print(VERSION) elif args[0] == 'view': from sys import path path.append(os.path.abspath(os.curdir)) from kmos import view view.main(steps_per_frame=options.steps_per_frame) elif args[0] == 'xml': from sys import path path.append(os.path.abspath(os.curdir)) from kmos.run import KMC_Model model = KMC_Model(banner=False, print_rates=False) print(model.xml()) else: parser.error('Command "%s" not understood.' % args[0]) def sh(banner): """Wrapper around interactive ipython shell that factors out ipython version depencies. """ from distutils.version import LooseVersion import IPython if hasattr(IPython, 'release'): try: from IPython.terminal.embed import InteractiveShellEmbed InteractiveShellEmbed(banner1=banner)() except ImportError: try: from IPython.frontend.terminal.embed \ import InteractiveShellEmbed InteractiveShellEmbed(banner1=banner)() except ImportError: from IPython.Shell import IPShellEmbed IPShellEmbed(banner=banner)() else: from IPython.Shell import IPShellEmbed IPShellEmbed(banner=banner)()
gpl-3.0
sjperkins/tensorflow
tensorflow/contrib/learn/python/learn/learn_io/data_feeder.py
88
31139
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Implementations of different data feeders to provide data for TF trainer.""" # TODO(ipolosukhin): Replace this module with feed-dict queue runners & queues. from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools import math import numpy as np import six from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.platform import tf_logging as logging # pylint: disable=g-multiple-import,g-bad-import-order from .pandas_io import HAS_PANDAS, extract_pandas_data, extract_pandas_matrix, extract_pandas_labels from .dask_io import HAS_DASK, extract_dask_data, extract_dask_labels # pylint: enable=g-multiple-import,g-bad-import-order def _get_in_out_shape(x_shape, y_shape, n_classes, batch_size=None): """Returns shape for input and output of the data feeder.""" x_is_dict, y_is_dict = isinstance( x_shape, dict), y_shape is not None and isinstance(y_shape, dict) if y_is_dict and n_classes is not None: assert (isinstance(n_classes, dict)) if batch_size is None: batch_size = list(x_shape.values())[0][0] if x_is_dict else x_shape[0] elif batch_size <= 0: raise ValueError('Invalid batch_size %d.' % batch_size) if x_is_dict: input_shape = {} for k, v in list(x_shape.items()): input_shape[k] = [batch_size] + (list(v[1:]) if len(v) > 1 else [1]) else: x_shape = list(x_shape[1:]) if len(x_shape) > 1 else [1] input_shape = [batch_size] + x_shape if y_shape is None: return input_shape, None, batch_size def out_el_shape(out_shape, num_classes): out_shape = list(out_shape[1:]) if len(out_shape) > 1 else [] # Skip first dimension if it is 1. if out_shape and out_shape[0] == 1: out_shape = out_shape[1:] if num_classes is not None and num_classes > 1: return [batch_size] + out_shape + [num_classes] else: return [batch_size] + out_shape if not y_is_dict: output_shape = out_el_shape(y_shape, n_classes) else: output_shape = dict([ (k, out_el_shape(v, n_classes[k] if n_classes is not None and k in n_classes else None)) for k, v in list(y_shape.items()) ]) return input_shape, output_shape, batch_size def _data_type_filter(x, y): """Filter data types into acceptable format.""" if HAS_DASK: x = extract_dask_data(x) if y is not None: y = extract_dask_labels(y) if HAS_PANDAS: x = extract_pandas_data(x) if y is not None: y = extract_pandas_labels(y) return x, y def _is_iterable(x): return hasattr(x, 'next') or hasattr(x, '__next__') def setup_train_data_feeder(x, y, n_classes, batch_size=None, shuffle=True, epochs=None): """Create data feeder, to sample inputs from dataset. If `x` and `y` are iterators, use `StreamingDataFeeder`. Args: x: numpy, pandas or Dask matrix or dictionary of aforementioned. Also supports iterables. y: numpy, pandas or Dask array or dictionary of aforementioned. Also supports iterables. n_classes: number of classes. Must be None or same type as y. In case, `y` is `dict` (or iterable which returns dict) such that `n_classes[key] = n_classes for y[key]` batch_size: size to split data into parts. Must be >= 1. shuffle: Whether to shuffle the inputs. epochs: Number of epochs to run. Returns: DataFeeder object that returns training data. Raises: ValueError: if one of `x` and `y` is iterable and the other is not. """ x, y = _data_type_filter(x, y) if HAS_DASK: # pylint: disable=g-import-not-at-top import dask.dataframe as dd if (isinstance(x, (dd.Series, dd.DataFrame)) and (y is None or isinstance(y, (dd.Series, dd.DataFrame)))): data_feeder_cls = DaskDataFeeder else: data_feeder_cls = DataFeeder else: data_feeder_cls = DataFeeder if _is_iterable(x): if y is not None and not _is_iterable(y): raise ValueError('Both x and y should be iterators for ' 'streaming learning to work.') return StreamingDataFeeder(x, y, n_classes, batch_size) return data_feeder_cls( x, y, n_classes, batch_size, shuffle=shuffle, epochs=epochs) def _batch_data(x, batch_size=None): if (batch_size is not None) and (batch_size <= 0): raise ValueError('Invalid batch_size %d.' % batch_size) x_first_el = six.next(x) x = itertools.chain([x_first_el], x) chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance( x_first_el, dict) else [] chunk_filled = False for data in x: if isinstance(data, dict): for k, v in list(data.items()): chunk[k].append(v) if (batch_size is not None) and (len(chunk[k]) >= batch_size): chunk[k] = np.matrix(chunk[k]) chunk_filled = True if chunk_filled: yield chunk chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance( x_first_el, dict) else [] chunk_filled = False else: chunk.append(data) if (batch_size is not None) and (len(chunk) >= batch_size): yield np.matrix(chunk) chunk = [] if isinstance(x_first_el, dict): for k, v in list(data.items()): chunk[k] = np.matrix(chunk[k]) yield chunk else: yield np.matrix(chunk) def setup_predict_data_feeder(x, batch_size=None): """Returns an iterable for feeding into predict step. Args: x: numpy, pandas, Dask array or dictionary of aforementioned. Also supports iterable. batch_size: Size of batches to split data into. If `None`, returns one batch of full size. Returns: List or iterator (or dictionary thereof) of parts of data to predict on. Raises: ValueError: if `batch_size` <= 0. """ if HAS_DASK: x = extract_dask_data(x) if HAS_PANDAS: x = extract_pandas_data(x) if _is_iterable(x): return _batch_data(x, batch_size) if len(x.shape) == 1: x = np.reshape(x, (-1, 1)) if batch_size is not None: if batch_size <= 0: raise ValueError('Invalid batch_size %d.' % batch_size) n_batches = int(math.ceil(float(len(x)) / batch_size)) return [x[i * batch_size:(i + 1) * batch_size] for i in xrange(n_batches)] return [x] def setup_processor_data_feeder(x): """Sets up processor iterable. Args: x: numpy, pandas or iterable. Returns: Iterable of data to process. """ if HAS_PANDAS: x = extract_pandas_matrix(x) return x def check_array(array, dtype): """Checks array on dtype and converts it if different. Args: array: Input array. dtype: Expected dtype. Returns: Original array or converted. """ # skip check if array is instance of other classes, e.g. h5py.Dataset # to avoid copying array and loading whole data into memory if isinstance(array, (np.ndarray, list)): array = np.array(array, dtype=dtype, order=None, copy=False) return array def _access(data, iloc): """Accesses an element from collection, using integer location based indexing. Args: data: array-like. The collection to access iloc: `int` or `list` of `int`s. Location(s) to access in `collection` Returns: The element of `a` found at location(s) `iloc`. """ if HAS_PANDAS: import pandas as pd # pylint: disable=g-import-not-at-top if isinstance(data, pd.Series) or isinstance(data, pd.DataFrame): return data.iloc[iloc] return data[iloc] def _check_dtype(dtype): if dtypes.as_dtype(dtype) == dtypes.float64: logging.warn( 'float64 is not supported by many models, consider casting to float32.') return dtype class DataFeeder(object): """Data feeder is an example class to sample data for TF trainer.""" def __init__(self, x, y, n_classes, batch_size=None, shuffle=True, random_state=None, epochs=None): """Initializes a DataFeeder instance. Args: x: One feature sample which can either Nd numpy matrix of shape `[n_samples, n_features, ...]` or dictionary of Nd numpy matrix. y: label vector, either floats for regression or class id for classification. If matrix, will consider as a sequence of labels. Can be `None` for unsupervised setting. Also supports dictionary of labels. n_classes: Number of classes, 0 and 1 are considered regression, `None` will pass through the input labels without one-hot conversion. Also, if `y` is `dict`, then `n_classes` must be `dict` such that `n_classes[key] = n_classes for label y[key]`, `None` otherwise. batch_size: Mini-batch size to accumulate samples in one mini batch. shuffle: Whether to shuffle `x`. random_state: Numpy `RandomState` object to reproduce sampling. epochs: Number of times to iterate over input data before raising `StopIteration` exception. Attributes: x: Input features (ndarray or dictionary of ndarrays). y: Input label (ndarray or dictionary of ndarrays). n_classes: Number of classes (if `None`, pass through indices without one-hot conversion). batch_size: Mini-batch size to accumulate. input_shape: Shape of the input (or dictionary of shapes). output_shape: Shape of the output (or dictionary of shapes). input_dtype: DType of input (or dictionary of shapes). output_dtype: DType of output (or dictionary of shapes. """ x_is_dict, y_is_dict = isinstance(x, dict), y is not None and isinstance( y, dict) if isinstance(y, list): y = np.array(y) self._x = dict([(k, check_array(v, v.dtype)) for k, v in list(x.items()) ]) if x_is_dict else check_array(x, x.dtype) self._y = None if y is None else \ dict([(k, check_array(v, v.dtype)) for k, v in list(y.items())]) if x_is_dict else check_array(y, y.dtype) # self.n_classes is not None means we're converting raw target indices to one-hot. if n_classes is not None: if not y_is_dict: y_dtype = (np.int64 if n_classes is not None and n_classes > 1 else np.float32) self._y = (None if y is None else check_array(y, dtype=y_dtype)) self.n_classes = n_classes self.max_epochs = epochs x_shape = dict([(k, v.shape) for k, v in list(self._x.items()) ]) if x_is_dict else self._x.shape y_shape = dict([(k, v.shape) for k, v in list(self._y.items()) ]) if y_is_dict else None if y is None else self._y.shape self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape( x_shape, y_shape, n_classes, batch_size) # Input dtype matches dtype of x. self._input_dtype = dict([(k, _check_dtype(v.dtype)) for k, v in list(self._x.items())]) if x_is_dict \ else _check_dtype(self._x.dtype) # note: self._output_dtype = np.float32 when y is None self._output_dtype = dict([(k, _check_dtype(v.dtype)) for k, v in list(self._y.items())]) if y_is_dict \ else _check_dtype(self._y.dtype) if y is not None else np.float32 # self.n_classes is None means we're passing in raw target indices if n_classes is not None and y_is_dict: for key in list(n_classes.keys()): if key in self._output_dtype: self._output_dtype[key] = np.float32 self._shuffle = shuffle self.random_state = np.random.RandomState( 42) if random_state is None else random_state num_samples = list(self._x.values())[0].shape[ 0] if x_is_dict else self._x.shape[0] if self._shuffle: self.indices = self.random_state.permutation(num_samples) else: self.indices = np.array(range(num_samples)) self.offset = 0 self.epoch = 0 self._epoch_placeholder = None @property def x(self): return self._x @property def y(self): return self._y @property def shuffle(self): return self._shuffle @property def input_dtype(self): return self._input_dtype @property def output_dtype(self): return self._output_dtype @property def batch_size(self): return self._batch_size def make_epoch_variable(self): """Adds a placeholder variable for the epoch to the graph. Returns: The epoch placeholder. """ self._epoch_placeholder = array_ops.placeholder( dtypes.int32, [1], name='epoch') return self._epoch_placeholder def input_builder(self): """Builds inputs in the graph. Returns: Two placeholders for inputs and outputs. """ def get_placeholder(shape, dtype, name_prepend): if shape is None: return None if isinstance(shape, dict): placeholder = {} for key in list(shape.keys()): placeholder[key] = array_ops.placeholder( dtypes.as_dtype(dtype[key]), [None] + shape[key][1:], name=name_prepend + '_' + key) else: placeholder = array_ops.placeholder( dtypes.as_dtype(dtype), [None] + shape[1:], name=name_prepend) return placeholder self._input_placeholder = get_placeholder(self.input_shape, self._input_dtype, 'input') self._output_placeholder = get_placeholder(self.output_shape, self._output_dtype, 'output') return self._input_placeholder, self._output_placeholder def set_placeholders(self, input_placeholder, output_placeholder): """Sets placeholders for this data feeder. Args: input_placeholder: Placeholder for `x` variable. Should match shape of the examples in the x dataset. output_placeholder: Placeholder for `y` variable. Should match shape of the examples in the y dataset. Can be `None`. """ self._input_placeholder = input_placeholder self._output_placeholder = output_placeholder def get_feed_params(self): """Function returns a `dict` with data feed params while training. Returns: A `dict` with data feed params while training. """ return { 'epoch': self.epoch, 'offset': self.offset, 'batch_size': self._batch_size } def get_feed_dict_fn(self): """Returns a function that samples data into given placeholders. Returns: A function that when called samples a random subset of batch size from `x` and `y`. """ x_is_dict, y_is_dict = isinstance( self._x, dict), self._y is not None and isinstance(self._y, dict) # Assign input features from random indices. def extract(data, indices): return (np.array(_access(data, indices)).reshape((indices.shape[0], 1)) if len(data.shape) == 1 else _access(data, indices)) # assign labels from random indices def assign_label(data, shape, dtype, n_classes, indices): shape[0] = indices.shape[0] out = np.zeros(shape, dtype=dtype) for i in xrange(out.shape[0]): sample = indices[i] # self.n_classes is None means we're passing in raw target indices if n_classes is None: out[i] = _access(data, sample) else: if n_classes > 1: if len(shape) == 2: out.itemset((i, int(_access(data, sample))), 1.0) else: for idx, value in enumerate(_access(data, sample)): out.itemset(tuple([i, idx, value]), 1.0) else: out[i] = _access(data, sample) return out def _feed_dict_fn(): """Function that samples data into given placeholders.""" if self.max_epochs is not None and self.epoch + 1 > self.max_epochs: raise StopIteration assert self._input_placeholder is not None feed_dict = {} if self._epoch_placeholder is not None: feed_dict[self._epoch_placeholder.name] = [self.epoch] # Take next batch of indices. x_len = list(self._x.values())[0].shape[ 0] if x_is_dict else self._x.shape[0] end = min(x_len, self.offset + self._batch_size) batch_indices = self.indices[self.offset:end] # adding input placeholder feed_dict.update( dict([(self._input_placeholder[k].name, extract(v, batch_indices)) for k, v in list(self._x.items())]) if x_is_dict else {self._input_placeholder.name: extract(self._x, batch_indices)}) # move offset and reset it if necessary self.offset += self._batch_size if self.offset >= x_len: self.indices = self.random_state.permutation( x_len) if self._shuffle else np.array(range(x_len)) self.offset = 0 self.epoch += 1 # return early if there are no labels if self._output_placeholder is None: return feed_dict # adding output placeholders if y_is_dict: for k, v in list(self._y.items()): n_classes = (self.n_classes[k] if k in self.n_classes else None) if self.n_classes is not None else None shape, dtype = self.output_shape[k], self._output_dtype[k] feed_dict.update({ self._output_placeholder[k].name: assign_label(v, shape, dtype, n_classes, batch_indices) }) else: shape, dtype, n_classes = self.output_shape, self._output_dtype, self.n_classes feed_dict.update({ self._output_placeholder.name: assign_label(self._y, shape, dtype, n_classes, batch_indices) }) return feed_dict return _feed_dict_fn class StreamingDataFeeder(DataFeeder): """Data feeder for TF trainer that reads data from iterator. Streaming data feeder allows to read data as it comes it from disk or somewhere else. It's custom to have this iterators rotate infinetly over the dataset, to allow control of how much to learn on the trainer side. """ def __init__(self, x, y, n_classes, batch_size): """Initializes a StreamingDataFeeder instance. Args: x: iterator each element of which returns one feature sample. Sample can be a Nd numpy matrix or dictionary of Nd numpy matrices. y: iterator each element of which returns one label sample. Sample can be a Nd numpy matrix or dictionary of Nd numpy matrices with 1 or many classes regression values. n_classes: indicator of how many classes the corresponding label sample has for the purposes of one-hot conversion of label. In case where `y` is a dictionary, `n_classes` must be dictionary (with same keys as `y`) of how many classes there are in each label in `y`. If key is present in `y` and missing in `n_classes`, the value is assumed `None` and no one-hot conversion will be applied to the label with that key. batch_size: Mini batch size to accumulate samples in one batch. If set `None`, then assumes that iterator to return already batched element. Attributes: x: input features (or dictionary of input features). y: input label (or dictionary of output features). n_classes: number of classes. batch_size: mini batch size to accumulate. input_shape: shape of the input (can be dictionary depending on `x`). output_shape: shape of the output (can be dictionary depending on `y`). input_dtype: dtype of input (can be dictionary depending on `x`). output_dtype: dtype of output (can be dictionary depending on `y`). """ # pylint: disable=invalid-name,super-init-not-called x_first_el = six.next(x) self._x = itertools.chain([x_first_el], x) if y is not None: y_first_el = six.next(y) self._y = itertools.chain([y_first_el], y) else: y_first_el = None self._y = None self.n_classes = n_classes x_is_dict = isinstance(x_first_el, dict) y_is_dict = y is not None and isinstance(y_first_el, dict) if y_is_dict and n_classes is not None: assert isinstance(n_classes, dict) # extract shapes for first_elements if x_is_dict: x_first_el_shape = dict( [(k, [1] + list(v.shape)) for k, v in list(x_first_el.items())]) else: x_first_el_shape = [1] + list(x_first_el.shape) if y_is_dict: y_first_el_shape = dict( [(k, [1] + list(v.shape)) for k, v in list(y_first_el.items())]) elif y is None: y_first_el_shape = None else: y_first_el_shape = ([1] + list(y_first_el[0].shape if isinstance( y_first_el, list) else y_first_el.shape)) self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape( x_first_el_shape, y_first_el_shape, n_classes, batch_size) # Input dtype of x_first_el. if x_is_dict: self._input_dtype = dict( [(k, _check_dtype(v.dtype)) for k, v in list(x_first_el.items())]) else: self._input_dtype = _check_dtype(x_first_el.dtype) # Output dtype of y_first_el. def check_y_dtype(el): if isinstance(el, np.ndarray): return el.dtype elif isinstance(el, list): return check_y_dtype(el[0]) else: return _check_dtype(np.dtype(type(el))) # Output types are floats, due to both softmaxes and regression req. if n_classes is not None and (y is None or not y_is_dict) and n_classes > 0: self._output_dtype = np.float32 elif y_is_dict: self._output_dtype = dict( [(k, check_y_dtype(v)) for k, v in list(y_first_el.items())]) elif y is None: self._output_dtype = None else: self._output_dtype = check_y_dtype(y_first_el) def get_feed_params(self): """Function returns a `dict` with data feed params while training. Returns: A `dict` with data feed params while training. """ return {'batch_size': self._batch_size} def get_feed_dict_fn(self): """Returns a function, that will sample data and provide it to placeholders. Returns: A function that when called samples a random subset of batch size from x and y. """ self.stopped = False def _feed_dict_fn(): """Samples data and provides it to placeholders. Returns: `dict` of input and output tensors. """ def init_array(shape, dtype): """Initialize array of given shape or dict of shapes and dtype.""" if shape is None: return None elif isinstance(shape, dict): return dict([(k, np.zeros(shape[k], dtype[k])) for k in list(shape.keys())]) else: return np.zeros(shape, dtype=dtype) def put_data_array(dest, index, source=None, n_classes=None): """Puts data array into container.""" if source is None: dest = dest[:index] elif n_classes is not None and n_classes > 1: if len(self.output_shape) == 2: dest.itemset((index, source), 1.0) else: for idx, value in enumerate(source): dest.itemset(tuple([index, idx, value]), 1.0) else: if len(dest.shape) > 1: dest[index, :] = source else: dest[index] = source[0] if isinstance(source, list) else source return dest def put_data_array_or_dict(holder, index, data=None, n_classes=None): """Puts data array or data dictionary into container.""" if holder is None: return None if isinstance(holder, dict): if data is None: data = {k: None for k in holder.keys()} assert isinstance(data, dict) for k in holder.keys(): num_classes = n_classes[k] if (n_classes is not None and k in n_classes) else None holder[k] = put_data_array(holder[k], index, data[k], num_classes) else: holder = put_data_array(holder, index, data, n_classes) return holder if self.stopped: raise StopIteration inp = init_array(self.input_shape, self._input_dtype) out = init_array(self.output_shape, self._output_dtype) for i in xrange(self._batch_size): # Add handling when queue ends. try: next_inp = six.next(self._x) inp = put_data_array_or_dict(inp, i, next_inp, None) except StopIteration: self.stopped = True if i == 0: raise inp = put_data_array_or_dict(inp, i, None, None) out = put_data_array_or_dict(out, i, None, None) break if self._y is not None: next_out = six.next(self._y) out = put_data_array_or_dict(out, i, next_out, self.n_classes) # creating feed_dict if isinstance(inp, dict): feed_dict = dict([(self._input_placeholder[k].name, inp[k]) for k in list(self._input_placeholder.keys())]) else: feed_dict = {self._input_placeholder.name: inp} if self._y is not None: if isinstance(out, dict): feed_dict.update( dict([(self._output_placeholder[k].name, out[k]) for k in list(self._output_placeholder.keys())])) else: feed_dict.update({self._output_placeholder.name: out}) return feed_dict return _feed_dict_fn class DaskDataFeeder(object): """Data feeder for that reads data from dask.Series and dask.DataFrame. Numpy arrays can be serialized to disk and it's possible to do random seeks into them. DaskDataFeeder will remove requirement to have full dataset in the memory and still do random seeks for sampling of batches. """ def __init__(self, x, y, n_classes, batch_size, shuffle=True, random_state=None, epochs=None): """Initializes a DaskDataFeeder instance. Args: x: iterator that returns for each element, returns features. y: iterator that returns for each element, returns 1 or many classes / regression values. n_classes: indicator of how many classes the label has. batch_size: Mini batch size to accumulate. shuffle: Whether to shuffle the inputs. random_state: random state for RNG. Note that it will mutate so use a int value for this if you want consistent sized batches. epochs: Number of epochs to run. Attributes: x: input features. y: input label. n_classes: number of classes. batch_size: mini batch size to accumulate. input_shape: shape of the input. output_shape: shape of the output. input_dtype: dtype of input. output_dtype: dtype of output. Raises: ValueError: if `x` or `y` are `dict`, as they are not supported currently. """ if isinstance(x, dict) or isinstance(y, dict): raise ValueError( 'DaskDataFeeder does not support dictionaries at the moment.') # pylint: disable=invalid-name,super-init-not-called import dask.dataframe as dd # pylint: disable=g-import-not-at-top # TODO(terrytangyuan): check x and y dtypes in dask_io like pandas self._x = x self._y = y # save column names self._x_columns = list(x.columns) if isinstance(y.columns[0], str): self._y_columns = list(y.columns) else: # deal with cases where two DFs have overlapped default numeric colnames self._y_columns = len(self._x_columns) + 1 self._y = self._y.rename(columns={y.columns[0]: self._y_columns}) # TODO(terrytangyuan): deal with unsupervised cases # combine into a data frame self.df = dd.multi.concat([self._x, self._y], axis=1) self.n_classes = n_classes x_count = x.count().compute()[0] x_shape = (x_count, len(self._x.columns)) y_shape = (x_count, len(self._y.columns)) # TODO(terrytangyuan): Add support for shuffle and epochs. self._shuffle = shuffle self.epochs = epochs self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape( x_shape, y_shape, n_classes, batch_size) self.sample_fraction = self._batch_size / float(x_count) self._input_dtype = _check_dtype(self._x.dtypes[0]) self._output_dtype = _check_dtype(self._y.dtypes[self._y_columns]) if random_state is None: self.random_state = 66 else: self.random_state = random_state def get_feed_params(self): """Function returns a `dict` with data feed params while training. Returns: A `dict` with data feed params while training. """ return {'batch_size': self._batch_size} def get_feed_dict_fn(self, input_placeholder, output_placeholder): """Returns a function, that will sample data and provide it to placeholders. Args: input_placeholder: tf.Placeholder for input features mini batch. output_placeholder: tf.Placeholder for output labels. Returns: A function that when called samples a random subset of batch size from x and y. """ def _feed_dict_fn(): """Samples data and provides it to placeholders.""" # TODO(ipolosukhin): option for with/without replacement (dev version of # dask) sample = self.df.random_split( [self.sample_fraction, 1 - self.sample_fraction], random_state=self.random_state) inp = extract_pandas_matrix(sample[0][self._x_columns].compute()).tolist() out = extract_pandas_matrix(sample[0][self._y_columns].compute()) # convert to correct dtype inp = np.array(inp, dtype=self._input_dtype) # one-hot encode out for each class for cross entropy loss if HAS_PANDAS: import pandas as pd # pylint: disable=g-import-not-at-top if not isinstance(out, pd.Series): out = out.flatten() out_max = self._y.max().compute().values[0] encoded_out = np.zeros((out.size, out_max + 1), dtype=self._output_dtype) encoded_out[np.arange(out.size), out] = 1 return {input_placeholder.name: inp, output_placeholder.name: encoded_out} return _feed_dict_fn
apache-2.0
samzhang111/wikipedia-jargon
all-subjects/make_tf_differences.py
1
2815
from __future__ import print_function import msgpack import sys import os from collections import defaultdict from helpers import text_dict_to_term_dict from WikiExtractor import clean, compact import pandas as pd def remove_wikipedia_markup(text): return compact(clean(text.decode('utf8'))) def print_help_and_exit(msg=''): if msg: print('Error: {}\n'.format(msg)) print('Usage: python make_tf_differences.py [n-grams] [path to directory]') print('The directory should contain files output by grab_texts.py') sys.exit(1) if len(sys.argv) <= 2: print_help_and_exit() ############################################################## # Read in msgpack files, separating them from simple and en Wikipedia ############################################################## ngrams = int(sys.argv[1]) text_dir = sys.argv[2] only = sys.argv[3:] print('Only calculating for: ', only) try: files = os.listdir(text_dir) except OSError: print_help_and_exit() ############################################################## # Organize the text files by subject, then wiki (en or simple) ############################################################## file_dict = defaultdict(dict) for f in files: try: subject, wiki, _ = f.split('_') if only and subject not in only: continue file_dict[subject][wiki] = f except ValueError: print_help_and_exit('Text directory does not contain valid filenames') for subject in file_dict: print('Importing ', subject) with open(os.path.join(text_dir, file_dict[subject]['en'])) as f: en_text = msgpack.load(f) en_text = {k: remove_wikipedia_markup(v) for k,v in en_text.items()} with open(os.path.join(text_dir, file_dict[subject]['simple'])) as f: sm_text = msgpack.load(f) sm_text = {k: remove_wikipedia_markup(v) for k,v in sm_text.items()} print('Calculating term differences') en_tf, en_counts = text_dict_to_term_dict(en_text, ngrams) sm_tf, sm_counts = text_dict_to_term_dict(sm_text, ngrams) sm_terms = set(sm_tf) en_terms = set(en_tf) term_differences = {} for t in sm_terms.union(en_terms): term_differences[t] = en_tf[t] - sm_tf[t] sorted_term_difference = sorted(term_differences.items(), key=lambda x: x[1]) print('Outputting term differences') td_df = pd.DataFrame(sorted_term_difference, columns=['term', 'term_difference']) td_df['en_tf'] = td_df.term.apply(lambda x: en_tf[x]) td_df['sm_tf'] = td_df.term.apply(lambda x: sm_tf[x]) try: os.mkdir('data/term-diffs/ngrams-{}'.format(ngrams)) except OSError: pass td_df.to_csv('data/term-diffs/ngrams-{}/{}_td.csv'.format(ngrams, subject), index=False, encoding='utf8')
gpl-3.0
spacetelescope/stsci.tools
doc/source/conf.py
1
7012
# -*- coding: utf-8 -*- # # stsci.tools documentation build configuration file, created by # sphinx-quickstart on Thu Oct 7 13:09:39 2010. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. from stsci.tools import __version__ # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.append(os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.imgmath', 'sphinx.ext.napoleon', 'sphinx.ext.intersphinx', 'sphinx.ext.autosummary', 'sphinx.ext.doctest'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'stsci.tools' copyright = u'2020, STScI' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The full version, including alpha/beta/rc tags. release = __version__ # The short X.Y version. version = '.'.join(release.split('.')[:2]) # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. #html_theme = 'sphinxdoc' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] #html_static_path = ['_static'] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". #html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. html_domain_indices = ['py-modindex'] # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'stsci.toolsdoc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). #latex_documents = [ # ('index', 'stsci.tools.tex', u'stsci.tools Documentation', # u'SSB', 'manual'), #] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True intersphinx_mapping = { 'python': ('https://docs.python.org/3', None), 'numpy': ('https://docs.scipy.org/doc/numpy/', None), 'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None), 'matplotlib': ('https://matplotlib.org/', (None, 'http://data.astropy.org/intersphinx/matplotlib.inv')), 'astropy': ('https://docs.astropy.org/en/stable/', None) }
bsd-3-clause
gsmaxwell/phase_offset_rx
gnuradio-core/src/examples/pfb/fmtest.py
17
7785
#!/usr/bin/env python # # Copyright 2009 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # from gnuradio import gr, blks2 import sys, math, time try: import scipy from scipy import fftpack except ImportError: print "Error: Program requires scipy (see: www.scipy.org)." sys.exit(1) try: import pylab except ImportError: print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)." sys.exit(1) class fmtx(gr.hier_block2): def __init__(self, lo_freq, audio_rate, if_rate): gr.hier_block2.__init__(self, "build_fm", gr.io_signature(1, 1, gr.sizeof_float), # Input signature gr.io_signature(1, 1, gr.sizeof_gr_complex)) # Output signature fmtx = blks2.nbfm_tx (audio_rate, if_rate, max_dev=5e3, tau=75e-6) # Local oscillator lo = gr.sig_source_c (if_rate, # sample rate gr.GR_SIN_WAVE, # waveform type lo_freq, #frequency 1.0, # amplitude 0) # DC Offset mixer = gr.multiply_cc () self.connect (self, fmtx, (mixer, 0)) self.connect (lo, (mixer, 1)) self.connect (mixer, self) class fmtest(gr.top_block): def __init__(self): gr.top_block.__init__(self) self._nsamples = 1000000 self._audio_rate = 8000 # Set up N channels with their own baseband and IF frequencies self._N = 5 chspacing = 16000 freq = [10, 20, 30, 40, 50] f_lo = [0, 1*chspacing, -1*chspacing, 2*chspacing, -2*chspacing] self._if_rate = 4*self._N*self._audio_rate # Create a signal source and frequency modulate it self.sum = gr.add_cc () for n in xrange(self._N): sig = gr.sig_source_f(self._audio_rate, gr.GR_SIN_WAVE, freq[n], 0.5) fm = fmtx(f_lo[n], self._audio_rate, self._if_rate) self.connect(sig, fm) self.connect(fm, (self.sum, n)) self.head = gr.head(gr.sizeof_gr_complex, self._nsamples) self.snk_tx = gr.vector_sink_c() self.channel = blks2.channel_model(0.1) self.connect(self.sum, self.head, self.channel, self.snk_tx) # Design the channlizer self._M = 10 bw = chspacing/2.0 t_bw = chspacing/10.0 self._chan_rate = self._if_rate / self._M self._taps = gr.firdes.low_pass_2(1, self._if_rate, bw, t_bw, attenuation_dB=100, window=gr.firdes.WIN_BLACKMAN_hARRIS) tpc = math.ceil(float(len(self._taps)) / float(self._M)) print "Number of taps: ", len(self._taps) print "Number of channels: ", self._M print "Taps per channel: ", tpc self.pfb = blks2.pfb_channelizer_ccf(self._M, self._taps) self.connect(self.channel, self.pfb) # Create a file sink for each of M output channels of the filter and connect it self.fmdet = list() self.squelch = list() self.snks = list() for i in xrange(self._M): self.fmdet.append(blks2.nbfm_rx(self._audio_rate, self._chan_rate)) self.squelch.append(blks2.standard_squelch(self._audio_rate*10)) self.snks.append(gr.vector_sink_f()) self.connect((self.pfb, i), self.fmdet[i], self.squelch[i], self.snks[i]) def num_tx_channels(self): return self._N def num_rx_channels(self): return self._M def main(): fm = fmtest() tstart = time.time() fm.run() tend = time.time() if 1: fig1 = pylab.figure(1, figsize=(12,10), facecolor="w") fig2 = pylab.figure(2, figsize=(12,10), facecolor="w") fig3 = pylab.figure(3, figsize=(12,10), facecolor="w") Ns = 10000 Ne = 100000 fftlen = 8192 winfunc = scipy.blackman # Plot transmitted signal fs = fm._if_rate d = fm.snk_tx.data()[Ns:Ns+Ne] sp1_f = fig1.add_subplot(2, 1, 1) X,freq = sp1_f.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs, window = lambda d: d*winfunc(fftlen), visible=False) X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X))) f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size)) p1_f = sp1_f.plot(f_in, X_in, "b") sp1_f.set_xlim([min(f_in), max(f_in)+1]) sp1_f.set_ylim([-120.0, 20.0]) sp1_f.set_title("Input Signal", weight="bold") sp1_f.set_xlabel("Frequency (Hz)") sp1_f.set_ylabel("Power (dBW)") Ts = 1.0/fs Tmax = len(d)*Ts t_in = scipy.arange(0, Tmax, Ts) x_in = scipy.array(d) sp1_t = fig1.add_subplot(2, 1, 2) p1_t = sp1_t.plot(t_in, x_in.real, "b-o") #p1_t = sp1_t.plot(t_in, x_in.imag, "r-o") sp1_t.set_ylim([-5, 5]) # Set up the number of rows and columns for plotting the subfigures Ncols = int(scipy.floor(scipy.sqrt(fm.num_rx_channels()))) Nrows = int(scipy.floor(fm.num_rx_channels() / Ncols)) if(fm.num_rx_channels() % Ncols != 0): Nrows += 1 # Plot each of the channels outputs. Frequencies on Figure 2 and # time signals on Figure 3 fs_o = fm._audio_rate for i in xrange(len(fm.snks)): # remove issues with the transients at the beginning # also remove some corruption at the end of the stream # this is a bug, probably due to the corner cases d = fm.snks[i].data()[Ns:Ne] sp2_f = fig2.add_subplot(Nrows, Ncols, 1+i) X,freq = sp2_f.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o, window = lambda d: d*winfunc(fftlen), visible=False) #X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X))) X_o = 10.0*scipy.log10(abs(X)) #f_o = scipy.arange(-fs_o/2.0, fs_o/2.0, fs_o/float(X_o.size)) f_o = scipy.arange(0, fs_o/2.0, fs_o/2.0/float(X_o.size)) p2_f = sp2_f.plot(f_o, X_o, "b") sp2_f.set_xlim([min(f_o), max(f_o)+0.1]) sp2_f.set_ylim([-120.0, 20.0]) sp2_f.grid(True) sp2_f.set_title(("Channel %d" % i), weight="bold") sp2_f.set_xlabel("Frequency (kHz)") sp2_f.set_ylabel("Power (dBW)") Ts = 1.0/fs_o Tmax = len(d)*Ts t_o = scipy.arange(0, Tmax, Ts) x_t = scipy.array(d) sp2_t = fig3.add_subplot(Nrows, Ncols, 1+i) p2_t = sp2_t.plot(t_o, x_t.real, "b") p2_t = sp2_t.plot(t_o, x_t.imag, "r") sp2_t.set_xlim([min(t_o), max(t_o)+1]) sp2_t.set_ylim([-1, 1]) sp2_t.set_xlabel("Time (s)") sp2_t.set_ylabel("Amplitude") pylab.show() if __name__ == "__main__": main()
gpl-3.0
wanggang3333/scikit-learn
examples/model_selection/plot_validation_curve.py
229
1823
""" ========================== Plotting Validation Curves ========================== In this plot you can see the training scores and validation scores of an SVM for different values of the kernel parameter gamma. For very low values of gamma, you can see that both the training score and the validation score are low. This is called underfitting. Medium values of gamma will result in high values for both scores, i.e. the classifier is performing fairly well. If gamma is too high, the classifier will overfit, which means that the training score is good but the validation score is poor. """ print(__doc__) import matplotlib.pyplot as plt import numpy as np from sklearn.datasets import load_digits from sklearn.svm import SVC from sklearn.learning_curve import validation_curve digits = load_digits() X, y = digits.data, digits.target param_range = np.logspace(-6, -1, 5) train_scores, test_scores = validation_curve( SVC(), X, y, param_name="gamma", param_range=param_range, cv=10, scoring="accuracy", n_jobs=1) train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) test_scores_mean = np.mean(test_scores, axis=1) test_scores_std = np.std(test_scores, axis=1) plt.title("Validation Curve with SVM") plt.xlabel("$\gamma$") plt.ylabel("Score") plt.ylim(0.0, 1.1) plt.semilogx(param_range, train_scores_mean, label="Training score", color="r") plt.fill_between(param_range, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.2, color="r") plt.semilogx(param_range, test_scores_mean, label="Cross-validation score", color="g") plt.fill_between(param_range, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.2, color="g") plt.legend(loc="best") plt.show()
bsd-3-clause
PatrickOReilly/scikit-learn
examples/plot_johnson_lindenstrauss_bound.py
67
7474
r""" ===================================================================== The Johnson-Lindenstrauss bound for embedding with random projections ===================================================================== The `Johnson-Lindenstrauss lemma`_ states that any high dimensional dataset can be randomly projected into a lower dimensional Euclidean space while controlling the distortion in the pairwise distances. .. _`Johnson-Lindenstrauss lemma`: https://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma Theoretical bounds ================== The distortion introduced by a random projection `p` is asserted by the fact that `p` is defining an eps-embedding with good probability as defined by: .. math:: (1 - eps) \|u - v\|^2 < \|p(u) - p(v)\|^2 < (1 + eps) \|u - v\|^2 Where u and v are any rows taken from a dataset of shape [n_samples, n_features] and p is a projection by a random Gaussian N(0, 1) matrix with shape [n_components, n_features] (or a sparse Achlioptas matrix). The minimum number of components to guarantees the eps-embedding is given by: .. math:: n\_components >= 4 log(n\_samples) / (eps^2 / 2 - eps^3 / 3) The first plot shows that with an increasing number of samples ``n_samples``, the minimal number of dimensions ``n_components`` increased logarithmically in order to guarantee an ``eps``-embedding. The second plot shows that an increase of the admissible distortion ``eps`` allows to reduce drastically the minimal number of dimensions ``n_components`` for a given number of samples ``n_samples`` Empirical validation ==================== We validate the above bounds on the digits dataset or on the 20 newsgroups text document (TF-IDF word frequencies) dataset: - for the digits dataset, some 8x8 gray level pixels data for 500 handwritten digits pictures are randomly projected to spaces for various larger number of dimensions ``n_components``. - for the 20 newsgroups dataset some 500 documents with 100k features in total are projected using a sparse random matrix to smaller euclidean spaces with various values for the target number of dimensions ``n_components``. The default dataset is the digits dataset. To run the example on the twenty newsgroups dataset, pass the --twenty-newsgroups command line argument to this script. For each value of ``n_components``, we plot: - 2D distribution of sample pairs with pairwise distances in original and projected spaces as x and y axis respectively. - 1D histogram of the ratio of those distances (projected / original). We can see that for low values of ``n_components`` the distribution is wide with many distorted pairs and a skewed distribution (due to the hard limit of zero ratio on the left as distances are always positives) while for larger values of n_components the distortion is controlled and the distances are well preserved by the random projection. Remarks ======= According to the JL lemma, projecting 500 samples without too much distortion will require at least several thousands dimensions, irrespective of the number of features of the original dataset. Hence using random projections on the digits dataset which only has 64 features in the input space does not make sense: it does not allow for dimensionality reduction in this case. On the twenty newsgroups on the other hand the dimensionality can be decreased from 56436 down to 10000 while reasonably preserving pairwise distances. """ print(__doc__) import sys from time import time import numpy as np import matplotlib.pyplot as plt from sklearn.random_projection import johnson_lindenstrauss_min_dim from sklearn.random_projection import SparseRandomProjection from sklearn.datasets import fetch_20newsgroups_vectorized from sklearn.datasets import load_digits from sklearn.metrics.pairwise import euclidean_distances # Part 1: plot the theoretical dependency between n_components_min and # n_samples # range of admissible distortions eps_range = np.linspace(0.1, 0.99, 5) colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range))) # range of number of samples (observation) to embed n_samples_range = np.logspace(1, 9, 9) plt.figure() for eps, color in zip(eps_range, colors): min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps) plt.loglog(n_samples_range, min_n_components, color=color) plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right") plt.xlabel("Number of observations to eps-embed") plt.ylabel("Minimum number of dimensions") plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components") # range of admissible distortions eps_range = np.linspace(0.01, 0.99, 100) # range of number of samples (observation) to embed n_samples_range = np.logspace(2, 6, 5) colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range))) plt.figure() for n_samples, color in zip(n_samples_range, colors): min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range) plt.semilogy(eps_range, min_n_components, color=color) plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right") plt.xlabel("Distortion eps") plt.ylabel("Minimum number of dimensions") plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps") # Part 2: perform sparse random projection of some digits images which are # quite low dimensional and dense or documents of the 20 newsgroups dataset # which is both high dimensional and sparse if '--twenty-newsgroups' in sys.argv: # Need an internet connection hence not enabled by default data = fetch_20newsgroups_vectorized().data[:500] else: data = load_digits().data[:500] n_samples, n_features = data.shape print("Embedding %d samples with dim %d using various random projections" % (n_samples, n_features)) n_components_range = np.array([300, 1000, 10000]) dists = euclidean_distances(data, squared=True).ravel() # select only non-identical samples pairs nonzero = dists != 0 dists = dists[nonzero] for n_components in n_components_range: t0 = time() rp = SparseRandomProjection(n_components=n_components) projected_data = rp.fit_transform(data) print("Projected %d samples from %d to %d in %0.3fs" % (n_samples, n_features, n_components, time() - t0)) if hasattr(rp, 'components_'): n_bytes = rp.components_.data.nbytes n_bytes += rp.components_.indices.nbytes print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6)) projected_dists = euclidean_distances( projected_data, squared=True).ravel()[nonzero] plt.figure() plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu) plt.xlabel("Pairwise squared distances in original space") plt.ylabel("Pairwise squared distances in projected space") plt.title("Pairwise distances distribution for n_components=%d" % n_components) cb = plt.colorbar() cb.set_label('Sample pairs counts') rates = projected_dists / dists print("Mean distances rate: %0.2f (%0.2f)" % (np.mean(rates), np.std(rates))) plt.figure() plt.hist(rates, bins=50, normed=True, range=(0., 2.)) plt.xlabel("Squared distances rate: projected / original") plt.ylabel("Distribution of samples pairs") plt.title("Histogram of pairwise distance rates for n_components=%d" % n_components) # TODO: compute the expected value of eps and add them to the previous plot # as vertical lines / region plt.show()
bsd-3-clause
DTUWindEnergy/Python4WindEnergy
lesson 3/results/ebra.py
1
8402
# -*- coding: utf-8 -*- <nbformat>3.0</nbformat> # <headingcell level=1> # Plotting with Matplotlib # <headingcell level=2> # Prepare for action # <codecell> import numpy as np import scipy as sp import sympy # Pylab combines the pyplot functionality (for plotting) with the numpy # functionality (for mathematics and for working with arrays) in a single namespace # aims to provide a closer MATLAB feel (the easy way). Note that his approach # should only be used when doing some interactive quick and dirty data inspection. # DO NOT USE THIS FOR SCRIPTS #from pylab import * # the convienient Matplotib plotting interface pyplot (the tidy/right way) # use this for building scripts. The examples here will all use pyplot. import matplotlib.pyplot as plt # for using the matplotlib API directly (the hard and verbose way) # use this when building applications, and/or backends import matplotlib as mpl # <markdowncell> # How would you like the IPython notebook show your plots? In order to use the # matplotlib IPython magic youre IPython notebook should be launched as # # ipython notebook --matplotlib=inline # # Make plots appear as a pop up window, chose the backend: 'gtk', 'inline', 'osx', 'qt', 'qt4', 'tk', 'wx' # # %matplotlib qt # # or inline the notebook (no panning, zooming through the plot). Not working in IPython 0.x # # %matplotib inline # # <codecell> # activate pop up plots #%matplotlib qt # or change to inline plots # %matplotlib inline # <headingcell level=3> # Matplotlib documentation # <markdowncell> # Finding your own way (aka RTFM). Hint: there is search box available! # # * http://matplotlib.org/contents.html # # The Matplotlib API docs: # # * http://matplotlib.org/api/index.html # # Pyplot, object oriented plotting: # # * http://matplotlib.org/api/pyplot_api.html # * http://matplotlib.org/api/pyplot_summary.html # # Extensive gallery with examples: # # * http://matplotlib.org/gallery.html # <headingcell level=3> # Tutorials for those who want to start playing # <markdowncell> # If reading manuals is too much for you, there is a very good tutorial available here: # # * http://nbviewer.ipython.org/github/jrjohansson/scientific-python-lectures/blob/master/Lecture-4-Matplotlib.ipynb # # Note that this tutorial uses # # from pylab import * # # which is usually not adviced in more advanced script environments. When using # # import matplotlib.pyplot as plt # # you need to preceed all plotting commands as used in the above tutorial with # # plt. # <markdowncell> # Give me more! # # [EuroScipy 2012 Matlotlib tutorial](http://www.loria.fr/~rougier/teaching/matplotlib/). Note that here the author uses ```from pylab import * ```. When using ```import matplotliblib.pyplot as plt``` the plotting commands need to be proceeded with ```plt.``` # <headingcell level=2> # Plotting template starting point # <codecell> # some sample data x = np.arange(-10,10,0.1) # <markdowncell> # To change the default plot configuration values. # <codecell> page_width_cm = 13 dpi = 200 inch = 2.54 # inch in cm # setting global plot configuration using the RC configuration style plt.rc('font', family='serif') plt.rc('xtick', labelsize=12) # tick labels plt.rc('ytick', labelsize=20) # tick labels plt.rc('axes', labelsize=20) # axes labels # If you don’t need LaTeX, don’t use it. It is slower to plot, and text # looks just fine without. If you need it, e.g. for symbols, then use it. #plt.rc('text', usetex=True) #<- P-E: Doesn't work on my Mac # <codecell> # create a figure instance, note that figure size is given in inches! fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(8,6)) # set the big title (note aligment relative to figure) fig.suptitle("suptitle 16, figure alignment", fontsize=16) # actual plotting ax.plot(x, x**2, label="label 12") # set axes title (note aligment relative to axes) ax.set_title("title 14, axes alignment", fontsize=14) # axes labels ax.set_xlabel('xlabel 12') ax.set_ylabel(r'$y_{\alpha}$ 12', fontsize=8) # legend ax.legend(fontsize=12, loc="best") # saving the figure in different formats # fig.savefig('figure-%03i.png' % dpi, dpi=dpi) # fig.savefig('figure.svg') # fig.savefig('figure.eps') # <codecell> # following steps are only relevant when using figures as pop up windows (with %matplotlib qt) # to update a figure with has been modified fig.canvas.draw() # show a figure fig.show() # <headingcell level=2> # Exercise # <markdowncell> # The current section is about you trying to figure out how to do several plotting features. You should use the previously mentioned resources to find how to do that. In many cases, google is your friend! # <markdowncell> # * add a grid to the plot # <codecell> plt.plot(x,x**2) plt.grid('on') # <markdowncell> # * change the location of the legend to different places # <codecell> plt.plot(x,x**2, label="label 12") plt.legend(fontsize=12, loc="upper right") # <markdowncell> # * find a way to control the line type and color, marker type and color, control the frequency of the marks (`markevery`). See plot options at: http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.plot # <codecell> stride = max( int(len(x) / 20), 1) plt.plot(x,x**2, 'ko-',color='forestgreen', markevery=stride,label="label 12") plt.legend(fontsize=12, loc="upper center") # <markdowncell> # * add different sub-plots # <codecell> fig, axes = plt.subplots(nrows=2, ncols=1,sharex=True) axes[0].plot(x,x**2) axes[1].plot(x,-x**2) # <markdowncell> # * size the figure such that when included on an A4 page the fonts are given in their true size # <codecell> # matplotlib.rcParams.update({'font.size': 22}) fig, axes = plt.subplots(nrows=2, ncols=1,sharex=True) axes[0].plot(x,x**2) axes[1].plot(x,-x**2) fig.set_size_inches(8.2,3) # using A4 width in inches? fig.set_dpi(100) for ax in axes: for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] + ax.get_xticklabels() + ax.get_yticklabels()): item.set_fontsize(12) # ax[0].set('xtick', labelsize=12) # tick labels # .rc('ytick', labelsize=20) # tick labels # .rc('axes', labelsize=20) # axes labels # fig.savefig('figure.pdf') # <markdowncell> # * make a contour plot # <codecell> X, Y = np.meshgrid(x,x) plt.figure() plt.contourf(X,Y,X*Y,linewidth=0.3,cmap=plt.get_cmap('hsv'),levels=np.arange(-1,1,0.1)) plt.show # im=ax.contourf(x,y,ui,levels=np.arange(Umean-5*Ustd,Umean+5*Ustd,Ustd/30),cmap=plt.get_cmap('hsv'),linewidth=0.1) # <markdowncell> # * use twinx() to create a second axis on the right for the second plot # <codecell> plt.figure() ax=plt.gca() ax.plot(x,x**2) ax2 = ax.twinx() ax2.plot(x,x**4, 'r') # <markdowncell> # * add horizontal and vertical lines using axvline(), axhline() # <codecell> plt.figure() plt.plot(x,x**2) plt.axvline(2) plt.axhline(10) # <markdowncell> # * autoformat dates for nice printing on the x-axis using fig.autofmt_xdate() # <codecell> import datetime dates = np.array([datetime.datetime.now() + datetime.timedelta(days=i) for i in xrange(24)]) fig, ax = plt.subplots(nrows=1, ncols=1) ax.plot(dates,xrange(24)) fig.autofmt_xdate() # <headingcell level=2> # Advanced exercises # <markdowncell> # We are going to play a bit with regression # <markdowncell> # * Create a vector x of equally spaced number between $x \in [0, 5\pi]$ of 1000 points (keyword: linspace) # <codecell> n=1000 x=np.linspace(0,5*np.pi,n) # <markdowncell> # * create a vector y, so that y=sin(x) with some random noise # <codecell> y = np.sin(x) +np.random.rand(n)-0.5 yth = np.sin(x) # <markdowncell> # * plot it like this: ![test](files/plt1.png) # <codecell> fig=plt.figure() ax=plt.gca() ax.plot(x,y,'b.') ax.plot(x,yth,'k--',label=r'$y=sin(x)$') # <markdowncell> # Try to do a polynomial fit on y(x) with different polynomial degree (Use numpy.polyfit to obtain coefficients) # # Plot it like this (use np.poly1d(coef)(x) to plot polynomials) ![test](files/plt2.png) # <codecell> for order in xrange(9): coeff=np.polyfit(x,y,order) ax.plot(x,np.poly1d(coeff)(x),label='deg %d'%order) # shrink current axis by 20% box = ax.get_position() ax.set_position([box.x0, box.y0, box.width * 0.8, box.height]) # Put a legend to the right of the current axis ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.show() # <codecell>
apache-2.0
levelrf/level_basestation
gr-filter/examples/fir_filter_ccc.py
13
3154
#!/usr/bin/env python from gnuradio import gr, filter from gnuradio import eng_notation from gnuradio.eng_option import eng_option from optparse import OptionParser try: import scipy except ImportError: print "Error: could not import scipy (http://www.scipy.org/)" sys.exit(1) try: import pylab except ImportError: print "Error: could not import pylab (http://matplotlib.sourceforge.net/)" sys.exit(1) class example_fir_filter_ccc(gr.top_block): def __init__(self, N, fs, bw, tw, atten, D): gr.top_block.__init__(self) self._nsamps = N self._fs = fs self._bw = bw self._tw = tw self._at = atten self._decim = D taps = filter.firdes.low_pass_2(1, self._fs, self._bw, self._tw, self._at) print "Num. Taps: ", len(taps) self.src = gr.noise_source_c(gr.GR_GAUSSIAN, 1) self.head = gr.head(gr.sizeof_gr_complex, self._nsamps) self.filt0 = filter.fir_filter_ccc(self._decim, taps) self.vsnk_src = gr.vector_sink_c() self.vsnk_out = gr.vector_sink_c() self.connect(self.src, self.head, self.vsnk_src) self.connect(self.head, self.filt0, self.vsnk_out) def main(): parser = OptionParser(option_class=eng_option, conflict_handler="resolve") parser.add_option("-N", "--nsamples", type="int", default=10000, help="Number of samples to process [default=%default]") parser.add_option("-s", "--samplerate", type="eng_float", default=8000, help="System sample rate [default=%default]") parser.add_option("-B", "--bandwidth", type="eng_float", default=1000, help="Filter bandwidth [default=%default]") parser.add_option("-T", "--transition", type="eng_float", default=100, help="Transition band [default=%default]") parser.add_option("-A", "--attenuation", type="eng_float", default=80, help="Stopband attenuation [default=%default]") parser.add_option("-D", "--decimation", type="int", default=1, help="Decmation factor [default=%default]") (options, args) = parser.parse_args () put = example_fir_filter_ccc(options.nsamples, options.samplerate, options.bandwidth, options.transition, options.attenuation, options.decimation) put.run() data_src = scipy.array(put.vsnk_src.data()) data_snk = scipy.array(put.vsnk_out.data()) # Plot the signals PSDs nfft = 1024 f1 = pylab.figure(1, figsize=(12,10)) s1 = f1.add_subplot(1,1,1) s1.psd(data_src, NFFT=nfft, noverlap=nfft/4, Fs=options.samplerate) s1.psd(data_snk, NFFT=nfft, noverlap=nfft/4, Fs=options.samplerate) f2 = pylab.figure(2, figsize=(12,10)) s2 = f2.add_subplot(1,1,1) s2.plot(data_src) s2.plot(data_snk.real, 'g') pylab.show() if __name__ == "__main__": try: main() except KeyboardInterrupt: pass
gpl-3.0
chungjjang80/FRETBursts
fretbursts/utils/examples/matplotlib_figure_mod_toolbar.py
2
1276
""" Example on how to add widgets the toolbar of a Matplotlib figure using the QT backend. No QT application is created, only the toolbar of the native MPL figure is modified. """ from PySide import QtGui, QtCore import matplotlib def test(): plot([1,2,3], lw=2) q = qt4_interface(gcf()) return q # WARNING: it's paramount to return the object otherwise, with # no references, python deletes it and the GUI doesn't respond! class qt4_interface: def __init__(self,fig): self.fig = fig toolbar = fig.canvas.toolbar self.line_edit = QtGui.QLineEdit() toolbar.addWidget(self.line_edit) self.line_edit.editingFinished.connect(self.do_something) self.spinbox = QtGui.QDoubleSpinBox() toolbar.addWidget(self.spinbox) self.spinbox.valueChanged.connect(self.do_something2) def do_something(self, *args): self.fig.axes[0].set_title(self.line_edit.text()) self.fig.canvas.draw() #f = open('l','a'); f.write('yes\n'); f.flush(); f.close() def do_something2(self, *args): self.fig.axes[0].set_xlim(0, self.spinbox.value()) self.fig.canvas.draw() #f = open('l','a'); f.write('yes\n'); f.flush(); f.close()
gpl-2.0
nagordon/mechpy
mechpy/composites.py
1
71681
# coding: utf-8 ''' Module for composite material analysis Hyer-Stress Analysis of Fiber-Reinforced Composite Materials Herakovich-Mechanics of Fibrous Composites Daniel-Engineering Mechanics of Composite Materials Kollar-Mechanics of COmposite Structures NASA- Basic Mechancis of Lamianted Composites https://ntrs.nasa.gov/archive/nasa/casi.ntrs.nasa.gov/19950009349.pdf TODO: * transverse shear stress reddy pg 136 or daniel pg 139 * include line loads (Qx,Qy) for combined loading * calculate capability of panel based on margin ''' #============================================================================== # Import Modules #============================================================================== from __future__ import print_function, division __author__ = 'Neal Gordon <nealagordon@gmail.com>' __date__ = '2016-12-02' __version__ = 0.1 from copy import copy from numpy import pi, zeros, ones, linspace, arange, array, sin, cos, sqrt, pi from numpy.linalg import solve, inv #from scipy import linalg import numpy as np #np.set_printoptions(suppress=False,precision=2) # suppress scientific notation np.set_printoptions(precision=3, linewidth=200)#, threshold=np.inf) import scipy from scipy.spatial import ConvexHull #np.set_printoptions(formatter={'float': lambda x: "{:.2f}".format(x)}) import pandas as pd import sympy as sp from sympy import Function, dsolve, Eq, Derivative, symbols, pprint from sympy.plotting import plot3d #from sympy import cos, sin #sp.init_printing(use_latex='mathjax') #sp.init_printing(wrap_line=False, pretty_print=True) import matplotlib as mpl mpl.rcParams['figure.figsize'] = (8,5) mpl.rcParams['font.size'] = 12 mpl.rcParams['legend.fontsize'] = 14 import matplotlib.pyplot as plt from matplotlib.pyplot import plot,figure,xlim,ylim,title,legend, \ grid, show, xlabel,ylabel, tight_layout from mpl_toolkits.mplot3d import axes3d # if using ipython console, turn off inline plotting #mpl.use('Qt5Agg') # inline plotting from IPython import get_ipython #get_ipython().magic('matplotlib inline') ###disable inline plotting try: get_ipython().magic('matplotlib') except: pass from IPython.display import display import os plt.close('all') #============================================================================== # Functions #============================================================================== def import_matprops(mymaterial=['T300_5208','AL_7075']): ''' import material properties ''' matprops = pd.read_csv(os.path.join(os.path.dirname(__file__), "compositematerials.csv"), index_col=0) if mymaterial==[] or mymaterial=='': print(matprops.columns.tolist()) mat = matprops[mymaterial] #mat.applymap(lambda x:np.float(x)) mat = mat.applymap(lambda x:pd.to_numeric(x, errors='ignore')) return mat def Sf(E1,E2,nu12,G12): '''transversely isptropic compliance matrix. pg 58 herakovich''' nu21 = E2*nu12/E1 S = array([[1/E1, -nu21/E2, 0], [-nu12/E1, 1/E2, 0], [0, 0, 1/G12]]) return S def S6f(E1,E2,E3,nu12,nu13,nu23,G12,G13,G23): ''' daniel pg 74 transversely isotropic compliance matrix. For transversly isotropic E2=E3, nu12=nu13,G12=G13,G23=E2/(2(1+nu23)) ''' S6 = array( [[ 1/E1, -nu12/E1, -nu12/E1, 0, 0, 0], [-nu12/E1, 1/E2, -nu23/E2, 0, 0, 0], [-nu12/E1, -nu23/E2, 1/E2, 0, 0, 0], [ 0, 0, 0, 1/G23, 0, 0], [ 0, 0, 0, 0, 1/G13, 0], [ 0, 0, 0, 0, 0, 1/G12]]) return S6 def C6f(E1,E2,E3,nu12,nu13,nu23,G12,G13,G23): ''' daniel pg 74 transversely isotropic stiffness matrix. ''' C6 = inv(S6f(E1,E2,E3,nu12,nu13,nu23,G12,G13,G23)) return C6 def Qf(E1,E2,nu12,G12): '''transversly isptropic compliance matrix. pg 58 herakovich G12 = E1/(2*(1+nu12)) if isotropic''' nu21 = E2*nu12/E1 Q = array([[E1/(1-nu12*nu21), E2*nu12/(1-nu12*nu21), 0], [ E2*nu12/(1-nu12*nu21), E2/(1-nu12*nu21), 0], [0, 0, G12]]) return Q def T61(th): '''Stress th=ply angle in degrees voight notation for stress tranform. sigma1 = T1 @ sigmax reddy pg 91''' n = sin(th*pi/180) m = cos(th*pi/180) T1 = array( [[m**2, n**2, 0, 0, 0, 2*m*n], [n**2, m**2, 0, 0, 0,-2*m*n], [0, 0, 1, 0, 0, 0], [0, 0, 0, m,-n, 0], [0, 0, 0, n, m, 0], [-m*n, m*n, 0, 0, 0,(m**2-n**2)]]) return T1 def T62(th): '''Strain voight notation for strain transform. epsilon1 = T2 @ epsilonx th=ply angle in degrees reddy pg 91 ''' n = sin(th*pi/180) m = cos(th*pi/180) T2 = array( [[m**2, n**2, 0, 0, 0, m*n], [n**2, m**2, 0, 0, 0,-m*n], [0, 0, 1, 0, 0, 0], [0, 0, 0, m,-n, 0], [0, 0, 0, n, m, 0], [-2*m*n, 2*m*n, 0, 0, 0,(m**2-n**2)]]) return T2 def T1(th): '''Stress Transform for Plane Stress th=ply angle in degrees voight notation for stress tranform. sigma1 = T1 @ sigmax recall T1(th)**-1 == T1(-th)''' n = sin(th*pi/180) m = cos(th*pi/180) T1 = array( [[m**2, n**2, 2*m*n], [n**2, m**2,-2*m*n], [-m*n, m*n,(m**2-n**2)]]) return T1 def T2(th): '''Strain Transform for Plane Stress th=ply angle in degrees voight notation for strain transform. epsilon1 = T2 @ epsilonx''' n = sin(th*pi/180) m = cos(th*pi/180) T2 = array( [[m**2, n**2, m*n], [n**2, m**2,-m*n], [-2*m*n, 2*m*n, (m**2-n**2)]]) return T2 def T1s(th): '''Symbolic Stress Transform for Plane Stress th=ply angle in degrees voight notation for stress tranform. sigma1 = T1 @ sigmax recall T1(th)**-1 == T1(-th)''' n = sp.sin(th*sp.pi/180) m = sp.cos(th*sp.pi/180) T1 = sp.Matrix( [[m**2, n**2, 2*m*n], [n**2, m**2,-2*m*n], [-m*n, m*n,(m**2-n**2)]]) return T1 def T2s(th): '''Symbolic Strain Transform for Plane Stress th=ply angle in degrees voight notation for strain transform. epsilon1 = T2 @ epsilonx''' n = sp.sin(th*sp.pi/180) m = sp.cos(th*sp.pi/180) T2 = sp.Matrix( [[m**2, n**2, m*n], [n**2, m**2,-m*n], [-2*m*n, 2*m*n, (m**2-n**2)]]) return T2 def failure_envelope(): # failure envelopes # max stress criteria # 1 direction in first row # 2 direction in second row # failure strength in compression #Fc = matrix([[-1250.0, -600.0], # [-200.0, -120.0]]) # ksi # ##failure strength in tension #Ft = matrix([[1500, 1000] # [50, 30]]) # ksi # ##Failure strength in shear #Fs = matrix( [100, 70] ) # Shear Fc1 = [-1250, -600] # Compression 1 direction Fc2 = [-200, -120] # Compression 2 direction Ft1 = [1500, 1000] # Tension 1 direction Ft2 = [50, 30] # Tension 2 direction Fs = [100, 70] # Shear # F1 = Ft(1); # F2 = Ft(1); # F6 = Fs(1); for c in range(2):# mattype factor = 1.25 # right plot( [Ft1[c], Ft1[c]], [Fc2[c], Ft2[c]]) # left plot( [Fc1[c], Fc1[c]] , [Fc2[c], Ft2[c]]) # top plot( [Fc1[c], Ft1[c]] , [Ft2[c], Ft2[c]]) # bottom plot( [Fc1[c], Ft1[c]] , [Fc2[c], Fc2[c]]) # center horizontal plot( [Fc1[c], Ft1[c]] , [0, 0]) # center vertical plot( [0, 0] , [Fc2[c], Ft2[c]]) #xlim([min(Fc1) max(Ft1)]*factor) #ylim([min(Fc2) max(Ft2)]*factor) xlabel('$\sigma_1,ksi$') ylabel('$\sigma_2,ksi$') title('failure envelope with Max-Stress Criteria') def material_plots(materials = ['Carbon_cloth_AGP3705H']): ''' plotting composite properties Sf(E1,E2,nu12,G12) ''' # plt.rcParams['figure.figsize'] = (10, 8) # plt.rcParams['font.size'] = 14 # plt.rcParams['legend.fontsize'] = 14 plyangle = arange(-45, 45.1, 0.1) h = 1 # lamina thickness layupname='[0]' mat = import_matprops(materials) Ex = mat[materials[0]].E1 Ey = mat[materials[0]].E2 nuxy = mat[materials[0]].nu12 Gxy = mat[materials[0]].G12 # layupname = '[0, 45, 45, 0]' # Ex= 2890983.38 # Ey= 2844063.06 # nuxy= 0.27 # Gxy= 1129326.25 # h = 0.0600 plt.close('all') S = Sf(Ex,Ey,nuxy,Gxy) C = inv(S) C11 = [(inv(T1(th)) @ C @ T2(th))[0,0] for th in plyangle] C22 = [(inv(T1(th)) @ C @ T2(th))[1,1] for th in plyangle] C33 = [(inv(T1(th)) @ C @ T2(th))[2,2] for th in plyangle] C12 = [(inv(T1(th)) @ C @ T2(th))[0,1] for th in plyangle] Exbar = zeros(len(plyangle)) Eybar = zeros(len(plyangle)) Gxybar = zeros(len(plyangle)) Q = Qf(Ex,Ey,nuxy,Gxy) Qbar = zeros((len(plyangle),3,3)) for i,th in enumerate(plyangle): Qbar[i] = solve(T1(th), Q) @ T2(th) #Qbar = [solve(T1(th),Q) @ T2(th) for th in plyangle] Qbar11 = Qbar[:,0,0] Qbar22 = Qbar[:,1,1] Qbar66 = Qbar[:,2,2] Qbar12 = Qbar[:,0,1] Qbar16 = Qbar[:,0,2] Qbar26 = Qbar[:,1,2] Aij = Qbar*h # laminate Stiffness # | Exbar Eybar Gxybar | # A = | vxybar vyxbar etasxbar | # | etaxsbar etaysbar etasybar | # laminate Comnpliance aij = zeros((len(plyangle),3,3)) for i, _Aij in enumerate(Aij): aij[i] = inv(_Aij) # material properties for whole laminate (Daniel, pg183) Exbar = [1/(h*_aij[0,0]) for _aij in aij] Eybar = [1/(h*_aij[1,1]) for _aij in aij] Gxybar = [1/(h*_aij[2,2]) for _aij in aij] # Global Stress s_xy = array([[100], [10], [5]]) # local ply stress s_12 = np.zeros((3,len(plyangle))) for i,th in enumerate(plyangle): #s_12[:,i] = np.transpose(T1(th) @ s_xy)[0] # local stresses s_12[:,[i]] = T1(th) @ s_xy # Plotting figure()#, figsize=(10,8)) plot(plyangle, C11, plyangle, C22, plyangle, C33, plyangle, C12) legend(['$\overline{C}_{11}$','$\overline{C}_{22}$', '$\overline{C}_{44}$', '$\overline{C}_{66}$']) title('Transversly Isotropic Stiffness properties of carbon fiber T300_5208') xlabel("$\Theta$") ylabel('$\overline{C}_{ii}$, ksi') grid() figure()#, figsize=(10,8)) plot(plyangle, Exbar, label = r"Modulus: $E_x$") plot(plyangle, Eybar, label = r"Modulus: $E_y$") plot(plyangle, Gxybar, label = r"Modulus: $G_{xy}$") title("Constitutive Properties in various angles") xlabel("$\Theta$") ylabel("modulus, psi") legend() grid() figure()#,figsize=(10,8)) plot(plyangle, s_12[0,:], label = '$\sigma_{11},ksi$' ) plot(plyangle, s_12[1,:], label = '$\sigma_{22},ksi$' ) plot(plyangle, s_12[2,:], label = '$\sigma_{12},ksi$' ) legend(loc='lower left') xlabel("$\Theta$") ylabel("Stress, ksi") grid() # plot plyangle as a function of time figure()#,figsize=(10,8)) plot(plyangle,Qbar11, label = "Qbar11") plot(plyangle,Qbar22, label = "Qbar22") plot(plyangle,Qbar66, label = "Qbar66") legend(loc='lower left') xlabel("$\Theta$") ylabel('Q') grid() # plot plyangle as a function of time figure()#,figsize=(10,8)) plot(plyangle,Qbar12, label = "Qbar12") plot(plyangle,Qbar16, label = "Qbar16") plot(plyangle,Qbar26, label = "Qbar26") legend(loc='lower left') xlabel("$\Theta$") ylabel('Q') grid() titlename = 'Laminate Properties varying angle for {} {}'.format(materials[0], layupname) #df = pd.DataFrame({'plyangle':plyangle, 'Exbar':Exbar, 'Eybar':Eybar,'Gxybar':Gxybar}) #print(df) #df.to_csv(titlename+'.csv') plt.figure(figsize=(9,6)) plot(plyangle, Exbar, label = r"Modulus: $E_x$") plot(plyangle, Eybar, label = r"Modulus: $E_y$") plot(plyangle, Gxybar, label = r"Modulus: $G_{xy}$") title(titlename) xlabel("$\Theta$") ylabel("modulus, psi") legend(loc='best') grid() #plt.savefig(titlename+'.png') show() def laminate_gen(lamthk=1.5, symang=[45,0,90], plyratio=2.0, matrixlayers=False, balancedsymmetric=True): ''' ## function created to quickly create laminates based on given parameters lamthk=1.5 # total #thickness of laminate symang = [45,0,90, 30] #symmertic ply angle plyratio=2.0 # lamina/matrix ratio matrixlayers=False # add matrix layers between lamina plys nonsym=False # symmetric mat = material type, as in different plies, matrix layer, uni tapes, etc #ply ratio can be used to vary the ratio of thickness between a matrix ply and lamina ply. if the same thickness is desired, plyratio = 1, if lamina is 2x as thick as matrix plyratio = 2 ''' if matrixlayers: nply = (len(symang)*2+1)*2 nm = nply-len(symang)*2 nf = len(symang)*2 tm = lamthk / (plyratio*nf + nm) tf = tm*plyratio plyangle = zeros(nply//2) mat = 2*ones(nply//2) # orthotropic fiber and matrix = 1, isotropic matrix=2, mat[1:-1:2] = 1 # [2 if x%2 else 1 for x in range(nply//2) ] plyangle[1:-1:2] = symang[:] # make a copy thk = tm*ones(nply//2) thk[2:2:-1] = tf lamang = list(symang) + list(symang[::-1]) plyangle = list(plyangle) + list(plyangle[::-1]) mat = list(mat) + list(mat[::-1]) thk = list(thk) + list(thk[::-1]) else: # no matrix layers, ignore ratio if balancedsymmetric: nply = len(symang)*2 mat = list(3*np.ones(nply)) thk = list(lamthk/nply*np.ones(nply)) lamang = list(symang) + list(symang[::-1]) plyangle = list(symang) + list(symang[::-1]) else: nply = len(symang) mat =[1]*nply thk = list(lamthk/nply*np.ones(nply)) lamang = symang[:] plyangle = symang[:] return thk,plyangle,mat,lamang def make_quasi(n0=4,n45=4): #n0 = 4 #n45 = 13 # #ply0 = [0]*n0 #ply45 = [45]*n45 #plyangle = [] #from itertools import zip_longest #for x,y in zip_longest(ply0,ply45): # if len(plyangle)<min(len(ply0),len(ply45))*2: # plyangle.append(x) # plyangle.append(y) # else: # plyangle.append(x) # plyangle.reverse() # plyangle.append(y) #plyangle = [x for x in plyangle if x is not None] #plyangle ntot = n45+n0 plyangle = [45]*int(n45) for p in [0]*int(n0): plyangle.append(p) plyangle.reverse() return plyangle #@xw.func def laminate_calcs(NM,ek,q0,plyangle,plymatindex,materials,platedim, zoffset,SF,plots,prints): ''' code to compute composite properties, applied mechanical and thermal loads and stress and strain inputs NM # force/moments lbs/in ek # strain, curvature in/in q0 = pressure plyangle # angle for each ply plymatindex # material for each ply materials # list materials used, general outline for computing elastic properties of composites 1) Determine engineering properties of unidirectional laminate. E1, E2, nu12, G12 2) Calculate ply stiffnesses Q11, Q22, Q12, Q66 in the principal/local coordinate system 3) Determine Fiber orientation of each ply 4) Calculate the transformed stiffness Qxy in the global coordinate system 5) Determine the through-thicknesses of each ply 6) Determine the laminate stiffness Matrix (ABD) 7) Calculate the laminate compliance matrix by inverting the ABD matrix 8) Calculate the laminate engineering properties # Stress Strain Relationship for a laminate, with Q=reduced stiffness matrix |sx | |Qbar11 Qbar12 Qbar16| |ex +z*kx | |sy |=|Qbar12 Qbar22 Qbar26|=|ey +z*ky | |sxy| |Qbar16 Qbar26 Qbar66| |exy+z*kxy| # Herakovich pg 84 Qbar = inv(T1) @ Q @ T2 == solve(T1, Q) @ T2 transformation reminders - see Herakovich for details sig1 = T1*sigx sigx = inv(T1)*sig1 eps1 = T2*epsx epsx = inv(T2)*epsx sigx = inv(T1)*Q*T2*epsx Qbar = inv(T1)*Q*T2 Sbar = inv(T2)*inv(Q)*T2 Notes, core transverse direction is G13, ribbon direction is G23 a_width = 50 # plate width (inches or meters) b_length = 50 # laminate length, inches or meters ''' #========================================================================== # Initialize python settings #========================================================================== #get_ipython().magic('matplotlib') plt.close('all') plt.rcParams['figure.figsize'] = (12, 8) plt.rcParams['font.size'] = 13 #plt.rcParams['legend.fontsize'] = 14 #========================================================================== # Define composite properties #========================================================================== assert(len(plyangle)==len(plymatindex)) a_width, b_length = platedim # either apply strains or loads , lb/in Nx_, Ny_, Nxy_, Mx_, My_, Mxy_ = NM NMbarapp = array([[Nx_],[Ny_],[Nxy_],[Mx_],[My_],[Mxy_]]) ex_, ey_, exy_, kx_, ky_, kxy_ = ek epsilonbarapp = array([[ex_],[ey_],[exy_],[kx_],[ky_],[kxy_]]) Ti = 0 # initial temperature (C) Tf = 0 # final temperature (C) #SF = 1.0 # safety factor #========================================================================== # Import Material Properties #========================================================================== mat = import_matprops(materials) #mat = import_matprops(['E-Glass Epoxy cloth','rohacell2lb']) # Herakovich alphaf = lambda mat: array([[mat.alpha1], [mat.alpha2], [0]]) ''' to get ply material info, use as follows alpha = alphaf(mat[materials[plymatindex[i]]]) mat[materials[1]].E2 ''' laminatethk = array([mat[materials[i]].plythk for i in plymatindex ]) nply = len(laminatethk) # number of plies H = np.sum(laminatethk) # plate thickness # area = a_width*H z = zeros(nply+1) zmid = zeros(nply) z[0] = -H/2 for i in range(nply): z[i+1] = z[i] + laminatethk[i] zmid[i] = z[i] + laminatethk[i]/2 #========================================================================== # ABD Matrix Compute #========================================================================== # Reduced stiffness matrix for a plane stress ply in principal coordinates # calcluating Q from the Compliance matrix may cause cancE1ation errors A = zeros((3,3)); B = zeros((3,3)); D = zeros((3,3)) for i in range(nply): # = nply Q = Qf(mat[materials[plymatindex[i]]].E1, mat[materials[plymatindex[i]]].E2, mat[materials[plymatindex[i]]].nu12, mat[materials[plymatindex[i]]].G12 ) Qbar = solve(T1(plyangle[i]), Q) @ T2(plyangle[i]) # inv(T1(plyangle[i])) @ Q @ T2(plyangle[i]) A += Qbar*(z[i+1]-z[i]) # coupling stiffness B += (1/2)*Qbar*(z[i+1]**2-z[i]**2) # bending or flexural laminate stiffness relating moments to curvatures D += (1/3)*Qbar*(z[i+1]**3-z[i]**3) #Cbar6 = T61 @ C6 @ np.transpose(T61) # laminate stiffness matrix ABD = zeros((6,6)) ABD[0:3,0:3] = A ABD[0:3,3:6] = B + zoffset*A ABD[3:6,0:3] = B + zoffset*A ABD[3:6,3:6] = D + 2*zoffset*B + zoffset**2*A # laminatee compliance abcd = inv(ABD) a = abcd[0:3,0:3] #========================================================================== # Laminate Properties #========================================================================== # effective laminate shear coupling coefficients etasxbar = a[0,2]/a[2,2] etasybar = a[1,2]/a[2,2] etaxsbar = a[2,0]/a[0,0] etaysbar = a[2,1]/a[1,1] # laminate engineer properties Exbar = 1 / (H*a[0,0]) Eybar = 1 / (H*a[1,1]) Gxybar = 1 / (H*a[2,2]) nuxybar = -a[0,1]/a[0,0] nuyxbar = -a[0,1]/a[1,1] # TODO: validate results, does not appear to be correct # strain centers, pg 72, NASA-Basic mechanics of lamianted composites # added divide by zero epsilon z_eps0_x = -B[0,0] / (D[0,0] + 1e-16) z_eps0_y = -B[0,1] / (D[0,1] + 1e-16) z_eps0_xy = -B[0,2] / (D[0,2] + 1e-16) z_sc = -B[2,2] / (D[2,2] +1e-16) # shear center # --------------------- Double Check --------------------- # # Laminate compliance matrix # LamComp = array([ [1/Exbar, -nuyxbar/Eybar, etasxbar/Gxybar], # [-nuxybar/Exbar, 1/Eybar , etasybar/Gxybar], # [etaxsbar/Exbar, etaysbar/Eybar, 1/Gxybar]] ) # # Daniel pg 183 # # combines applied loads and applied strains # strain_laminate = LamComp @ Nxyzapplied[:3]/H + strainxyzapplied[:3] # Nxyz = A @ strain_laminate # stress_laminate = Nxyz/H # -------------------------------------------------------- #========================================================================== # Pressure Load #========================================================================== #========================================================================== # pressure displacement and moments #========================================================================== D11,D12,D22,D66 = D[0,0], D[0,1], D[1,1], D[2,2] B11 = B[0,0] A11, A12 = A[0,0], A[0,1] # reddy pg 247 Navier displacement solution for a simply supported plate s = b_length/a_width x = a_width/2 y = b_length/2 # 5.2.8, reddy, or hyer 13.123 terms = 5 w0 = 0 for m in range(1,terms,2): for n in range(1,terms,2): dmn = pi**4/b_length**4 * (D11*m**4*s**4 + 2*(D12 + 2*D66)*m**2*n**2*s**2 + D22*n**4) alpha = m*pi/a_width beta = n*pi/b_length # for uniformly distributed loads, m,n = 1,3,5,... Qmn = 16*q0/(pi**2*m*n) Wmn = Qmn/dmn w0 += Wmn * sin(alpha*x) * sin(beta*y) w0_simplesupport = w0 # 5.2.12a, reddy # mid span moments Mxq=Myq=Mxyq=0 for m in range(1,terms,2): for n in range(1,terms,2): dmn = pi**4/b_length**4 * (D11*m**4*s**4 + 2*(D12 + 2*D66)*m**2*n**2*s**2 + D22*n**4) alpha = m*pi/a_width beta = n*pi/b_length # for uniformly distributed loads, m,n = 1,3,5,... Qmn = 16*q0/(pi**2*m*n) Wmn = Qmn/dmn Mxq += (D11*alpha**2 + D12*beta**2 ) * Wmn * sin(m*pi*x/a_width) * sin(n*pi*y/b_length) Myq += (D12*alpha**2 + D22*beta**2 ) * Wmn * sin(m*pi*x/a_width) * sin(n*pi*y/b_length) Mxyq += alpha*beta*D66 * Wmn * cos(m*pi*x/a_width) * cos(n*pi*y/b_length) Mxyq = -2*Mxyq NMq = [[0],[0],[0],[Mxq],[Myq],[Mxyq]] # hyer, x-pin-pin, y-free-free plate reaction forces, pg 619 # Forces and Moments across the width of the plate A11R = A11*(1-B11**2/(A11*D11)) D11R = D11*(1-B11**2/(A11*D11)) Nxq0 = lambda x: B11/D11 * q0 * a_width**2 /12 Nyq0 = lambda x: B11 * A12*q0 * a_width**2 / (D11*A11R*12) * (6*(x/a_width)**2-1/2) Nxyq0 = lambda x: 0 Mxq0 = lambda x: q0 * a_width**2/8 * (1-4*(x/a_width)**2) Myq0 = lambda x: D12 * q0 * a_width**2 / (D11R*8) * ((1-2*B11**2/(3*A11*D11))-(4*(x/a_width)**2)) Mxyq0 = lambda x: 0 # clamped plate 5.4.11, reddy #w0_clamped = ( 49 * q0*a_width**4 * (x/a_width - (x/a_width)**2 )**2 * (y/b_length - (y/b_length)**2)**2) / (8 * (7*D11+4*(D12 + 2*D66)*s**2 + 7*D22*s**4) ) # reddy, 5.4.12 w0_clamped = 0.00342 * (q0*a_width**4) / (D11+0.5714*(D12+2*D66)*s**2+D22*s**4) # reddy, 5.4.15 #w0_clamped = 0.00348 * (q0*a_width**4) / (D11*b_length**4+0.6047*(D12+2*D66)*s**2+D22*s**4) # reddy 5.4.15, for isotropic D11=D w0_clamped_isotropic = 0.00134*q0*a_width**4/D11 #========================================================================== # Applied Loads and pressure loads #========================================================================== NMbarapptotal = NMbarapp + NMq + ABD @ epsilonbarapp #========================================================================== # Thermal Loads #========================================================================== ''' if the material is isotropic and unconstrained, then no thermal stresses will be experienced. If there are constraints, then the material will experience thermally induced stresses. As with orthotropic materials, various directions will have different stresses, and when stacked in various orientations, stresses can be unintuitive and complicated. Global Thermal strains are subtracted from applied strains # 1) determine the free unrestrained thermal strains in each layer, alphabar ''' dT = Tf-Ti Nhatth= zeros((3,1)) # unit thermal force in global CS Mhatth = zeros((3,1)) # unit thermal moment in global CS alphabar = zeros((3,nply)) # global ply CTE for i in range(nply): # = nply Q = Qf(mat[materials[plymatindex[i]]].E1, mat[materials[plymatindex[i]]].E2, mat[materials[plymatindex[i]]].nu12, mat[materials[plymatindex[i]]].G12 ) alpha = alphaf(mat[materials[plymatindex[i]]]) Qbar = inv(T1(plyangle[i])) @ Q @ T2(plyangle[i]) alphabar[:,[i]] = solve(T2(plyangle[i]), alpha) #alphabar[:,[i]] = inv(T2(plyangle[i])) @ alpha # Convert to global CS Nhatth += Qbar @ (alphabar[:,[i]])*(z[i+1] - z[i]) # Hyer method for calculating thermal unit loads Mhatth += 0.5*Qbar@(alphabar[:,[i]])*(z[i+1]**2-z[i]**2) NMhatth = np.vstack((Nhatth,Mhatth)) NMbarth = NMhatth*dT # resultant thermal loads # Laminate CTE epsilonhatth = abcd@NMhatth # laminate CTE # applied loads and thermal loads epsilonbarapp = abcd @ NMbarapptotal epsilonbarth = abcd @ NMbarth # resultant thermal strains epsilonbartotal = epsilonbarapp + epsilonbarth # Composite respone from applied mechanical loads and strains. Average # properties only. Used to compare results from tensile test. #epsilon_laminate = abcd@NMbarapptotal #sigma_laminate = ABD@epsilon_laminate/H epsilon_laminate = epsilonbartotal[:] sigma_laminate = ABD@epsilonbartotal/H alpha_laminate = a@Nhatth # determine thermal load and applied loads or strains Hyer pg 435,452 Nx = NMbarapptotal[0,0]*a_width # units kiloNewtons, total load as would be applied in a tensile test Ny = NMbarapptotal[1,0]*b_length # units kN #========================================================================== # Thermal and mechanical local and global stresses at the ply interface #========================================================================== # Declare variables for plotting epsilon_app = zeros((3,2*nply)) sigma_app = zeros((3,2*nply)) epsilonbar_app = zeros((3,2*nply)) sigmabar_app = zeros((3,2*nply)) epsilon_th = zeros((3,2*nply)) sigma_th = zeros((3,2*nply)) epsilonbar_th = zeros((3,2*nply)) sigmabar_th = zeros((3,2*nply)) epsilon = zeros((3,2*nply)) epsilonbar = zeros((3,2*nply)) sigma = zeros((3,2*nply)) sigmabar = zeros((3,2*nply)) for i,k in enumerate(range(0,2*nply,2)): # stress is calcuated at top and bottom of each ply Q = Qf(mat[materials[plymatindex[i]]].E1, mat[materials[plymatindex[i]]].E2, mat[materials[plymatindex[i]]].nu12, mat[materials[plymatindex[i]]].G12 ) Qbar = inv(T1(plyangle[i])) @ Q @ T2(plyangle[i]) ### transverse shear, herakovich pg 254 #Q44 = mat[materials[plymatindex[i]]].G23 #Q55 = mat[materials[plymatindex[i]]].G13 #Qbar44 = Q44*cos(plyangle[i])**2+Q55*sin(plyangle[i])**2 #Qbar55 = Q55*cos(plyangle[i])**2 + Q44*sin(plyangle[i])**2 #Qbar45 = (Q55-Q44)*cos(plyangle[i])*sin(plyangle[i]) #epsilontransverse = array([[gammayz],[gammaxz]]) #sigmatransverse = array([[Qbar44, Qbar45],[Qbar45, Qbar55]]) @ epsilontransverse # Global stresses and strains, applied load only epsbarapp1 = epsilonbarapp[0:3] + z[i]*epsilonbarapp[3:7] epsbarapp2 = epsilonbarapp[0:3] + z[i+1]*epsilonbarapp[3:7] sigbarapp1 = Qbar @ epsbarapp1 sigbarapp2 = Qbar @ epsbarapp2 # Local stresses and strains, appplied load only epsapp1 = T2(plyangle[i]) @ epsbarapp1 epsapp2 = T2(plyangle[i]) @ epsbarapp2 sigapp1 = Q @ epsapp1 sigapp2 = Q @ epsapp2 # Interface Stresses and Strains epsilon_app[:,k:k+2] = np.column_stack((epsapp1,epsapp2)) epsilonbar_app[:,k:k+2] = np.column_stack((epsbarapp1,epsbarapp2)) sigma_app[:,k:k+2] = np.column_stack((sigapp1,sigapp2)) sigmabar_app[:,k:k+2] = np.column_stack((sigbarapp1,sigbarapp2)) # Global stress and strains, thermal loading only epsbarth1 = epsilonbarth[0:3] + z[i]*epsilonbarth[3:7] - dT*alphabar[:,[i]] epsbarth2 = epsilonbarth[0:3] + z[i+1]*epsilonbarth[3:7] - dT*alphabar[:,[i]] sigbarth1 = Qbar @ epsbarth1 sigbarth2 = Qbar @ epsbarth2 # Local stress and strains, thermal loading only epsth1 = T2(plyangle[i]) @ epsbarth1 epsth2 = T2(plyangle[i]) @ epsbarth2 sigth1 = Q @ epsth1 sigth2 = Q @ epsth2 # Interface Stresses and Strains epsilon_th[:,k:k+2] = np.column_stack((epsth1,epsth2)) epsilonbar_th[:,k:k+2] = np.column_stack((epsbarth1+dT*alphabar[:,[i]],epsbarth2+dT*alphabar[:,[i]])) # remove the local thermal loads for plotting. only use local thermal strains for calculating stress sigma_th[:,k:k+2] = np.column_stack((sigth1,sigth2)) sigmabar_th[:,k:k+2] = np.column_stack((sigbarth1,sigbarth2)) # TOTAL global stresses and strains, applied and thermal epsbar1 = epsbarapp1 + epsbarth1 epsbar2 = epsbarapp2 + epsbarth2 sigbar1 = Qbar @ epsbar1 sigbar2 = Qbar @ epsbar2 # TOTAL local stresses and strains , applied and thermal eps1 = T2(plyangle[i]) @ epsbar1 eps2 = T2(plyangle[i]) @ epsbar2 sig1 = Q @ eps1 sig2 = Q @ eps2 # Interface Stresses and Strains epsilon[:,k:k+2] = np.column_stack((eps1,eps2)) epsilonbar[:,k:k+2] = np.column_stack((epsbar1+dT*alphabar[:,[i]],epsbar2+dT*alphabar[:,[i]])) # remove the local thermal loads for plotting. only use local thermal strains for calculating stress sigma[:,k:k+2] = np.column_stack((sig1,sig2)) sigmabar[:,k:k+2] = np.column_stack((sigbar1,sigbar2)) #========================================================================== # Strength Failure Calculations #========================================================================== # Strength Ratio STRENGTHRATIO_MAXSTRESS = zeros((3,2*nply)) # Failure Index FAILUREINDEX_MAXSTRESS = zeros((3,2*nply)) STRENGTHRATIO_TSAIWU = zeros((nply)) for i,k in enumerate(range(0,2*nply,2)): # stress s1 = sigma[0,k] s2 = sigma[1,k] s12 = np.abs(sigma[2,k]) # strength F1 = mat[materials[plymatindex[i]]].F1t if s1 > 0 else mat[materials[plymatindex[i]]].F1c F2 = mat[materials[plymatindex[i]]].F2t if s2 > 0 else mat[materials[plymatindex[i]]].F2c F12 = mat[materials[plymatindex[i]]].F12 # Max Stress failure index ,failure if > 1, then fail, FI = 1/SR FAILUREINDEX_MAXSTRESS[0,k:k+2] = s1 / F1 FAILUREINDEX_MAXSTRESS[1,k:k+2] = s2 / F2 FAILUREINDEX_MAXSTRESS[2,k:k+2] = s12 / F12 # Tsai Wu, failure occures when > 1 F1t = mat[materials[plymatindex[i]]].F1t F1c = mat[materials[plymatindex[i]]].F1c F2t = mat[materials[plymatindex[i]]].F2t F2c = mat[materials[plymatindex[i]]].F2c F12 = mat[materials[plymatindex[i]]].F12 # inhomogeneous Tsai-Wu criterion # from Daniel # http://www2.mae.ufl.edu/haftka/composites/mcdaniel-nonhomogenous.pdf f1 = 1/F1t + 1/F1c f2 = 1/F2t + 1/F2c f11 = -1/(F1t*F1c) f22 = -1/(F2t*F2c) f66 = 1/F12**2 f12 = -0.5*sqrt(f11*f22) #TW = f1*s1 + f2*s2 + f11*s1**2 + f22*s2**2 + f66*s12**2 + 2*f12*s1*s2 # polynomial to solve. Added a machine epsilon to avoid divide by zero errors lam1 = f11*s1**2 + f22*s2**2 + f66*s12**2 + 2*f12*s1*s2 + 1e-16 lam2 = f1*s1 + f2*s2 + 1e-16 lam3 = -1 # smallest positive root roots = array([(-lam2+sqrt(lam2**2-4*lam1*lam3)) / (2*lam1) , (-lam2-sqrt(lam2**2-4*lam1*lam3)) / (2*lam1)] ) STRENGTHRATIO_TSAIWU[i] = roots[roots>=0].min() # strength ratio # f1 = 1/F1t - 1/F1c # f2 = 1/F2t - 1/F2c # f11 = 1/(F1t*F1c) # f22 = 1/(F2t*F2c) # f66 = 1/F12**2 # STRENGTHRATIO_TSAIWU[i] = 2 / (f1*s2 + f2*s2 + sqrt((f1*s1+f2*s2)**2+4*(f11*s1**2+f22*s2**2+f66*s12**2))) ### Apply safety factors FAILUREINDEX_MAXSTRESS = FAILUREINDEX_MAXSTRESS * SF STRENGTHRATIO_TSAIWU = STRENGTHRATIO_TSAIWU / SF ### MARGINSAFETY_TSAIWU = STRENGTHRATIO_TSAIWU-1 # margin of safety # strength ratio for max stress, if < 1, then fail, SR = 1/FI STRENGTHRATIO_MAXSTRESS = 1/(FAILUREINDEX_MAXSTRESS+1e-16) # margin of safety based on max stress criteria MARGINSAFETY_MAXSTRESS = STRENGTHRATIO_MAXSTRESS-1 # minimum margin of safety for Max stress failure MARGINSAFETY_MAXSTRESS_min = MARGINSAFETY_MAXSTRESS.min().min() FAILUREINDEX_MAXSTRESS_max = FAILUREINDEX_MAXSTRESS.max().max() # minimum margin of safety of both Tsai-Wu and Max Stress #MARGINSAFETY_MAXSTRESS_min = np.minimum(MARGINSAFETY_MAXSTRESS.min().min(), MARGINSAFETY_TSAIWU.min() ) # find critial values for all failure criteria #MARGINSAFETY_MAXSTRESS = MARGINSAFETY_MAXSTRESS[~np.isinf(MARGINSAFETY_MAXSTRESS)] # remove inf #MARGINSAFETY_TSAIWU = MARGINSAFETY_TSAIWU[~np.isinf(MARGINSAFETY_TSAIWU)] # remove inf #========================================================================== # Buckling Failure Calculations #========================================================================== ''' Buckling of Clamped plates under shear load, reddy, 5.6.17''' k11 = 537.181*D11/a_width**4 + 324.829*(D12+2*D66)/(a_width**2*b_length**2) + 537.181*D22/b_length**4 k12 = 23.107/(a_width*b_length) k22 = 3791.532*D11/a_width**4 + 4227.255*(D12+2*D66)/(a_width**2*b_length**2) + 3791.532*D22/b_length**4 Nxycrit0 = 1/k12*np.sqrt(k11*k22) FI_clamped_shear_buckling = (abs(Nxy_)*SF) / Nxycrit0 # failure if > 1 MS_clamped_shear_buckling = 1/(FI_clamped_shear_buckling+1e-16)-1 '''Kassapoglous pg 126,137 simply supported plate buckling, assumes Nx>0 is compression Nxcrit0 is the axial load that causes buckling Nxycrit0 is the shear load that cause buckling Nxcrit is the axial load part of a combined load that causes buckling Nxycrit is the shear load part of a combined load that causes buckling ''' # no buckling issues if Nx is positive # buckling calcuations assumes Nx compression is positive. Nx__ = abs(Nx_) if Nx_ < 0 else np.float64(0) Nxy__ = np.float64(0) if Nxy_ == 0 else abs(Nxy_) # assume shear in 1 direction although both directions are ok # Nxy=0 Nxcrit0 = pi**2/a_width**2 * (D11 + 2*(D12 + 2*D66)*a_width**2/b_length**2 + D22*a_width**4/b_length**4) # Nx=0 Nxycrit0 = 9*pi**4*b_length / (32*a_width**3) * (D11 + 2*(D12 + 2*D66)*a_width**2/b_length**2 + D22*a_width**4/b_length**4) FI_Nxy0_buckling, FI_Nx0_buckling, FI_Nx_buckling, FI_Nxy_buckling = 0,0,0,0 if Nx__ == 0 or Nxy__ == 0: FI_Nxy0_buckling = (Nxy__*SF)/Nxycrit0 FI_Nx0_buckling = (Nx__*SF)/Nxcrit0 else: # interaction term k = Nxy__ / Nx__ Nxcrit = min( abs((pi**2/a_width**2) * (D11 + 2*(D12 + 2*D66)*a_width**2/b_length**2 +D22*a_width**4/b_length**4 ) / (2-8192*a_width**2*k**2/(81*b_length**2*pi**4)) * (5 + sqrt(9 + 65536*a_width**2*k**2/(81*pi**4*b_length**2)))) , abs((pi**2/a_width**2) * (D11 + 2*(D12 + 2*D66)*a_width**2/b_length**2 +D22*a_width**4/b_length**4 ) / (2-8192*a_width**2*k**2/(81*b_length**2*pi**4)) * (5 - sqrt(9 + 65536*a_width**2*k**2/(81*pi**4*b_length**2)))) ) Nxycrit = Nxycrit0*sqrt(1-Nxcrit/Nxcrit0) # interactive calc FI_Nx_buckling = (Nx__ *SF)/Nxcrit FI_Nxy_buckling = (Nxy__*SF)/Nxycrit FI_combinedload_simplesupport_buckle = max([FI_Nxy0_buckling, FI_Nx0_buckling, FI_Nx_buckling, FI_Nxy_buckling] ) MS_min_buckling = 1/(FI_combinedload_simplesupport_buckle+1e-16)-1 #========================================================================== # Facesheet Wrinkling #========================================================================== #========================================================================== # principal lamainte stresses #========================================================================== sigma_principal_laminate = np.linalg.eig(array([[sigma_laminate[0,0],sigma_laminate[2,0],0], [sigma_laminate[2,0],sigma_laminate[1,0],0], [0,0,0]]))[0] tauxy_p = sigma_laminate[2,0] sigmax_p = sigma_laminate[0,0] sigmay_p = sigma_laminate[1,0] thetap = 0.5 * np.arctan( 2*tauxy_p / ((sigmax_p-sigmay_p+1e-16))) * 180/np.pi #========================================================================== # Printing Results #========================================================================== if prints: print('--------------- laminate1 Stress analysis of fibers----------') print('(z-) plyangles (z+)'); print(plyangle) print('(z-) plymatindex (z+)'); print(plymatindex) print('ply layers') ; print(z) print('lamiante thickness, H = {:.4f}'.format(H)) #print('x- zero strain laminate center, z_eps0_x = {:.4f}'.format(z_eps0_x)) #print('y- zero strain laminate center, z_eps0_y = {:.4f}'.format(z_eps0_y)) #print('xy-zero strain laminate center, z_eps0_xy = {:.4f}'.format(z_eps0_xy)) #print('shear center laminate center, z_sc = {:.4f}'.format(z_sc)) print('Applied Loads'); print(NM) print('ABD=');print(ABD) print('Ex= {:.2f}'.format(Exbar) ) print('Ey= {:.2f}'.format(Eybar) ) print('nuxy= {:.2f}'.format(nuxybar) ) print('Gxy= {:.2f}'.format(Gxybar) ) print('epsilon_laminate') ; print(epsilon_laminate) print('sigma_laminate') ; print(sigma_laminate) print('sigma_principal_laminate') ; print(sigma_principal_laminate) print('principal_angle = {:.2f} deg'.format(thetap)) print('NMbarapp') ; print(NMbarapp) print('sigma') ; print(sigma) print('\nMax Stress Percent Margin of Safety, failure < 0, minimum = {:.4f}'.format( MARGINSAFETY_MAXSTRESS_min ) ) print(MARGINSAFETY_MAXSTRESS) print('\nTsai-Wu Percent Margin of Safety, failure < 0, minimum = {:.4f}'.format(MARGINSAFETY_TSAIWU.min())) print(MARGINSAFETY_TSAIWU) print('\nmaximum failure index = {:.4f}'.format( FAILUREINDEX_MAXSTRESS_max )) print(FAILUREINDEX_MAXSTRESS) print('\nBuckling MS for Nxy only for clamped edges = {:.4f}\n'.format(MS_clamped_shear_buckling)) # print('---- Individual Buckling Failure Index (fail>1) combined loads and simple support -----') # print('FI_Nxy0 = {:.2f}'.format(FI_Nxy0_buckling) ) # print('FI_Nx0 = {:.2f}'.format(FI_Nx0_buckling) ) # print('---- Interactive Buckling Failure Index (fail>1) combined loads and simple support -----') # print('FI_Nx = {:.2f}'.format(FI_Nx_buckling) ) # print('FI_Nxy = {:.2f}'.format(FI_Nxy_buckling) ) # print('---- Buckling Failure Index (fail>1) combined loads and simple support -----') # print(FI_combinedload_simplesupport_buckle) print('buckling combined loads and simple support MS = {:.4f}\n'.format((MS_min_buckling))) print('Mx_midspan = {:.2f}'.format(Mxq) ) print('My_midspan = {:.2f}'.format(Myq) ) print('Mxy_midspan = {:.2f}'.format(Mxyq) ) print('w0_simplesupport = {:.6f}'.format(w0_simplesupport) ) print('w0_clamped = {:.6f}'.format(w0_clamped) ) print('w0_clamped_isotropic= {:.6f}'.format(w0_clamped_isotropic) ) #display(sp.Matrix(sigmabar)) #========================================================================== # Plotting #========================================================================== if plots: windowwidth = 800 windowheight = 450 zplot = zeros(2*nply) for i,k in enumerate(range(0,2*nply,2)): # = nply zplot[k:k+2] = z[i:i+2] #legendlab = ['total','thermal','applied','laminate'] # global stresses and strains mylw = 1.5 #linewidth # Global Stresses and Strains f1, ((ax1,ax2,ax3), (ax4,ax5,ax6)) = plt.subplots(2,3, sharex='row', sharey=True) f1.canvas.set_window_title('Global Stress and Strain of %s laminate' % (plyangle)) stresslabel = ['$\sigma_x$','$\sigma_y$','$\\tau_{xy}$'] strainlabel = ['$\epsilon_x$','$\epsilon_y$','$\gamma_{xy}$'] for i,ax in enumerate([ax1,ax2,ax3]): ## the top axes ax.set_ylabel('thickness,z') ax.set_xlabel(strainlabel[i]) ax.set_title(' Ply Strain '+strainlabel[i]) ax.ticklabel_format(axis='x', style='sci', scilimits=(1,4)) # scilimits=(-2,2)) ax.plot(epsilonbar[i,:], zplot, color='blue', lw=mylw, label='total') ax.plot(epsilonbar_th[i,:], zplot, color='red', lw=mylw, alpha=0.75, linestyle='--', label='thermal') ax.plot(epsilonbar_app[i,:], zplot, color='green', lw=mylw, alpha=0.75,linestyle='-.', label='applied') ax.plot([epsilon_laminate[i], epsilon_laminate[i]],[np.min(z) , np.max(z)], color='black', lw=mylw, label='laminate') ax.grid(True) #ax.set_xticks(linspace( min(ax.get_xticks()) , max(ax.get_xticks()) ,6)) for i,ax in enumerate([ax4,ax5,ax6]): ax.set_ylabel('thickness,z') ax.set_xlabel(stresslabel[i]) ax.set_title(' Ply Stress '+stresslabel[i]) ax.ticklabel_format(axis='x', style='sci', scilimits=(-3,3)) # scilimits=(-2,2)) ax.plot(sigmabar[i,:], zplot, color='blue', lw=mylw, label='total') ax.plot(sigmabar_th[i,:], zplot, color='red', lw=mylw, alpha=0.75,linestyle='--', label='thermal') ax.plot(sigmabar_app[i,:], zplot, color='green', lw=mylw, alpha=0.75,linestyle='-.', label='applied') ax.plot([sigma_laminate[i], sigma_laminate[i]],[np.min(z) , np.max(z)], color='black', lw=mylw, label='laminate') ax.grid(True) leg = legend(fancybox=True) ; leg.get_frame().set_alpha(0.3) tight_layout() try: mngr = plt.get_current_fig_manager() mngr.window.setGeometry(25,50,windowwidth,windowheight) except: pass f1.show() #plt.savefig('global-stresses-strains.png') ### Local Stresses and Strains f2, ((ax1,ax2,ax3), (ax4,ax5,ax6)) = plt.subplots(2,3, sharex='row', sharey=True) f2.canvas.set_window_title('Local Stress and Strain of %s laminate' % (plyangle)) stresslabel = ['$\sigma_1$','$\sigma_2$','$\\tau_{12}$'] strainlabel = ['$\epsilon_1$','$\epsilon_2$','$\gamma_{12}$'] strengthplot = [ [ [F1t,F1t],[zplot.min(), zplot.max()], [F1c, F1c],[zplot.min(), zplot.max()] ] , [ [F2t,F2t],[zplot.min(), zplot.max()], [F2c, F2c],[zplot.min(), zplot.max()] ] , [ [F12,F12],[zplot.min(), zplot.max()], [-F12,-F12],[zplot.min(), zplot.max()] ] ] for i,ax in enumerate([ax1,ax2,ax3]): ## the top axes ax.set_ylabel('thickness,z') ax.set_xlabel(strainlabel[i]) ax.set_title(' Ply Strain '+strainlabel[i]) ax.ticklabel_format(axis='x', style='sci', scilimits=(1,4)) # scilimits=(-2,2)) ax.plot(epsilon[i,:], zplot, color='blue', lw=mylw, label='total') ax.plot(epsilon_th[i,:], zplot, color='red', lw=mylw, alpha=0.75,linestyle='--', label='thermal') ax.plot(epsilon_app[i,:], zplot, color='green', lw=mylw, alpha=0.75,linestyle='-.', label='applied') ax.plot([epsilon_laminate[i], epsilon_laminate[i]],[np.min(z) , np.max(z)], color='black', lw=mylw, label='laminate') ax.grid(True) for i,ax in enumerate([ax4,ax5,ax6]): ax.set_ylabel('thickness,z') ax.set_xlabel(stresslabel[i]) ax.set_title(' Ply Stress '+stresslabel[i]) ax.ticklabel_format(axis='x', style='sci', scilimits=(-3,3)) # scilimits=(-2,2)) ax.plot(sigma[i,:], zplot, color='blue', lw=mylw, label='total') ax.plot(sigma_th[i,:], zplot, color='red', lw=mylw, alpha=0.75,linestyle='--', label='thermal') ax.plot(sigma_app[i,:], zplot, color='green', lw=mylw, alpha=0.75,linestyle='-.', label='applied') ax.plot([sigma_laminate[i], sigma_laminate[i]],[np.min(z) , np.max(z)], color='black', lw=mylw, label='laminate') ### plots strengths #ax.plot(strengthplot[i][0],strengthplot[i][1], color='yellow', lw=mylw) ax.grid(True) leg = legend(fancybox=True) ; leg.get_frame().set_alpha(0.3) tight_layout() try: mngr = plt.get_current_fig_manager() mngr.window.setGeometry(windowwidth+50,50,windowwidth,windowheight) except: pass f2.show() #plt.savefig('local-stresses-strains.png') ### Failure f3, ((ax1,ax2,ax3)) = plt.subplots(1,3, sharex=True, sharey=True) f3.canvas.set_window_title('Failure Index(failure if > 1), %s laminate' % (plyangle)) stresslabel = ['$\sigma_1/F_1$','$\sigma_2/F_2$','$\\tau_{12}/F_{12}$'] for i,ax in enumerate([ax1,ax2,ax3]): ## the top axes ax.set_ylabel('thickness,z') ax.set_xlabel(stresslabel[i]) #ax.set_title(' Ply Strain at $\epsilon=%f$' % (epsxapp*100)) ax.ticklabel_format(axis='x', style='sci', scilimits=(1,4)) # scilimits=(-2,2)) ax.plot(FAILUREINDEX_MAXSTRESS[i,:], zplot, color='blue', lw=mylw, label='total') ax.grid(True) ax.set_title('Failure Index, fail if > 1') #leg = legend(fancybox=True) ; leg.get_frame().set_alpha(0.3) tight_layout() try: mngr = plt.get_current_fig_manager() mngr.window.setGeometry(25,windowheight+100,windowwidth,windowheight) except: pass f2.show() #plt.savefig('local-stresses-strains.png') ### warpage res = 100 Xplt,Yplt = np.meshgrid(np.linspace(-a_width/2,a_width/2,res), np.linspace(-b_length/2,b_length/2,res)) epsx = epsilon_laminate[0,0] epsy = epsilon_laminate[1,0] epsxy = epsilon_laminate[2,0] kapx = epsilon_laminate[3,0] kapy = epsilon_laminate[4,0] kapxy = epsilon_laminate[5,0] ### dispalcement w = -0.5*(kapx*Xplt**2 + kapy*Yplt**2 + kapxy*Xplt*Yplt) u = epsx*Xplt # pg 451 hyer fig = plt.figure('plate-warpage') ax = fig.gca(projection='3d') ax.plot_surface(Xplt, Yplt, w+zmid[0], cmap=mpl.cm.jet, alpha=0.3) ###ax.auto_scale_xyz([-(a_width/2)*1.1, (a_width/2)*1.1], [(b_length/2)*1.1, (b_length/2)*1.1], [-1e10, 1e10]) ax.set_xlabel('plate width,y-direction,in') ax.set_ylabel('plate length,x-direction, in') ax.set_zlabel('warpage,in') #ax.set_zlim(-0.01, 0.04) #mngr = plt.get_current_fig_manager() ; mngr.window.setGeometry(450,550,600, 450) try: mngr = plt.get_current_fig_manager() mngr.window.setGeometry(windowwidth+50,windowheight+100,windowwidth,windowheight) except: pass plt.show() #plt.savefig('plate-warpage') return MARGINSAFETY_MAXSTRESS_min, FAILUREINDEX_MAXSTRESS_max def plate(): ''' composite plate mechanics TODO - results need vetted ''' #========================================================================== # Initialize #========================================================================== get_ipython().magic('matplotlib') plt.close('all') plt.rcParams['figure.figsize'] = (12, 8) plt.rcParams['font.size'] = 13 #plt.rcParams['legend.fontsize'] = 14 #========================================================================== # Import Material Properties #========================================================================== plythk = 0.0025 plyangle = array([0,90,-45,45,0]) * np.pi/180 # angle for each ply nply = len(plyangle) # number of plies laminatethk = np.zeros(nply) + plythk H = sum(laminatethk) # plate thickness # Create z dimensions of laminate z_ = np.linspace(-H/2, H/2, nply+1) a = 20 # plate width; b = 10 # plate height q0_ = 5.7 # plate load; # Transversly isotropic material properties E1 = 150e9 E2 = 12.1e9 nu12 = 0.248 G12 = 4.4e9 nu23 = 0.458 G23 = E2 / (2*(1+nu23)) # Failure Strengths F1t = 1500e6 F1c = -1250e6 F2t = 50e6 F2c = -200e6 F12t = 100e6 F12c = -100e6 Strength = np.array([[F1t, F1c], [F2t, F2c], [F12t, F12c]]) th = sp.symbols('th') # Stiffnes matrix in material coordinates Cijm6 = inv(Sij6) # reduced stiffness in structural Cij = sp.Matrix([[Cij6[0,0], Cij6[0,1], 0], [Cij6[0,1], Cij6[1,1], 0], [0, 0, Cij6[5,5] ]] ) Tij = sp.Matrix([[cos(th)**2, sin(th)**2, 2*sin(th)*cos(th)], [sin(th)**2, cos(th)**2, -2*sin(th)*cos(th)], [-cos(th)*sin(th), sin(th)*cos(th), (cos(th)**2-sin(th)**2)]]) ## Cylindrical Bending of a laminated plate # displacement in w (z direction) from sympy.abc import x f = Function('f') eq = dsolve(2*x*f(x) + (x**2 + f(x)**2)*f(x).diff(x), f(x), hint = '1st_homogeneous_coeff_best', simplify=False) pprint(eq) #============================================================================== th,x,y,z,q0,C1,C2,C3,C4,C5,C6,C7,A11,B11,D11,A16,B16 = symbols('th x y z q0 C1 C2 C3 C4 C5 C6 C7 A11 B11 D11 A16 B16') wfun = Function('wfun') ufun = Function('ufun') ## EQ 4.4.1a eq1 = A11*ufun(x).diff(x,2) - B11*wfun(x).diff(x,3) #eq1 = A11*diff(ufun,x,2) - B11*diff(wfun,x,3); # C5 C1 ## EQ 4.4.1b #eq2 = A16*diff(ufun,x,2) - B16*diff(wfun,x,3); # C5 C1 eq2 = A16*ufun(x).diff(x,2) - B16*wfun(x).diff(x,3) ## EQ 4.4.1c #eq3 = B11*diff(ufun,x,3) - D11*diff(wfun,x,4) + q0; eq3 = B11*ufun(x).diff(x,3) - D11*wfun(x).diff(x,4) + q0 ################## python conversion eded here ################################ # solve eq1 eq2 and eq3 to get the w and u functions # displacement in w (z direction) from eq1,eq2,eq3 wfun = A11*q0*x**4 / (4*(6*B11**2-6*A11*D11)) + C1 + C2*x + C3*x**2 + C4*x**3 # C1 C2 C3 C4 # displacement in u (x direction) from eq1,eq2,eq3 ufun = B11*q0*x**3 / (6*(B11**2-A11*D11)) + C7 + x*C6 + 3*B11*x**2*C5/A11 # C5 C6 C7 # Cij6.evalf(subs={th:plyangle[i]}) * (z_[i+1]**3-z_[i]**3) # cond1 -> w(0)=0 at x(0), roller C1sol = sp.solve(wfun.subs(x,0), C1)[0] # = 0 # cond2 -> angle at dw/dx at x(0) is 0, cantilever C2sol = sp.solve(wfun.diff(x).subs(x,0),C2)[0] # = 0 # cond3 -> w(z) = 0 at x(a), roller C4sol1 = sp.solve(wfun.subs({x:a,C1:C1sol,C2:C2sol}),C4)[0] # C3 # cond4 u = 0 at x = 0 C7sol = sp.solve(ufun.subs(x,0),C7)[0] #=0 # u=0 at x = a C5sol1 = sp.solve(ufun.subs({x:a, C7:C7sol}),C5)[0] #C6 # cond 5 EQ 4.4.14a Myy = 0 @ x(a) (Mxx , B11 D11) (Myy, B12 D12) roller no moment C6sol1 = sp.solve( ( ((B11*ufun.diff(x)+0.5*wfun.diff(x)**2 ) - D11*wfun.diff(x,2)).subs({x:a, C1:C1sol, C2:C2sol, C4:C4sol1, C5:C5sol1, C7:C7sol})), C6)[0] # C6 C3 # EQ 4.4.13a, Nxx = 0 @ x(0) roller has no Nxx C6sol2 = sp.solve( ((A11* ufun.diff(x) + 0.5*wfun.diff(x)**2)-B11*wfun.diff(x,2)).subs({x:a, C1:C1sol, C2:C2sol, C4:C4sol1, C5:C5sol1, C7:C7sol}),C6)[0] # C6 C3 C3sol = sp.solve(C6sol1 - C6sol2,C3)[0] C4sol = C4sol1.subs(C3,C3sol) C6sol = sp.simplify(C6sol2.subs(C3,C3sol)) C5sol = sp.simplify(C5sol1.subs(C6,C6sol)) # substitute integration constants with actual values( _ is actual number) C1_ = copy(C1sol) C2_ = copy(C2sol) C7_ = copy(C7sol) C3_ = C3sol.subs({q0:q0_, A11:Aij[0,0], B11:Bij[0,0], D11:Dij[0,0]}) C4_ = C4sol.subs({q0:q0_, A11:Aij[0,0], B11:Bij[0,0], D11:Dij[0,0]}) C5_ = C5sol.subs({q0:q0_, A11:Aij[0,0], B11:Bij[0,0], D11:Dij[0,0]}) C6_ = C6sol.subs({q0:q0_, A11:Aij[0,0], B11:Bij[0,0], D11:Dij[0,0]}) # function w(x) vertical displacement w along z with actual vaules wsol = wfun.subs({q0:q0_, C1:C1_, C2:C2_, C3:C3_, C4:C4_, A11:Aij[0,0], B11:Bij[0,0], D11:Dij[0,0]}) # function u(x) horizontal displacement u along x with actual vaules usol = ufun.subs({q0:q0_, C5:C5_, C6:C6_, C7:C7_, A11:Aij[0,0], B11:Bij[0,0], D11:Dij[0,0]}) # 3d plots plot3d(wsol,(x,0,a), (y,0,b)) plt.xlabel('x') plt.ylabel('y') plt.title('Cylindrical Bending -Displacement of a plate With CLPT') ## Strain calculation # eq 3.3.8 (pg 116 reddy (pdf = 138)) epstotal = array([[usol.diff(x) + 0.5* wsol.diff(x)**5 - z*wsol.diff(x,2)],[0],[0]]) epsx = epstotal[0,0] ## Calculating and plotting Stress in each layer res = 8 # accuracy of finding max and min stress xplot = linspace(0,a,res) yplot = linspace(0,b,res) G0 = sp.symbols('G0') Globalminstress = np.zeros((3, nply)) Globalmaxstress = np.zeros((3, nply)) for kstress in range(3): # stress state s_x, s_y, s_xz plt.figure(kstress+1) for klay in range(nply): # loop through all layers thplot = plyangle[klay] zplot = linspace(z_[klay],z_[klay+1],res) stressplot = np.zeros((len(zplot),len(xplot))) ## Calc Stresses if kstress == 2: # Shear stresses G0_ = -sp.integrate(s_stress[0].diff(x),z)+G0 # solve for shear stresses from s_1 s_xz = sp.solve(G0_,G0)[0] # out of plane shear S_xz does not need to be transformed ?? plot3d(s_xz, (x,0, a), (z, z_[klay], z_[klay+1]) ) else: # normal stresses # Cij = reduced structural stiffness in strictural coordinates 3x3 # stress in structural coordinates s_stress = Cij.subs(th,thplot) @ epstotal # stressin material coordinates m_stress = Tij.subs(th,thplot) @ s_stress #ezsurf(m_stress(kstress),[0,a,z_(klay),z_(klay+1)]) ## find max stress in each layer ii=0 for i in xplot: jj=0 for j in zplot: if kstress == 2: stressplot[ii,jj] = s_xz.subs({x:i, z:j}) else: stressplot[ii,jj] = m_stress[kstress].subs({x:i, z:j}) jj+=jj ii+=ii Globalminstress[kstress,klay] = np.min(stressplot) Globalmaxstress[kstress,klay] = np.max(stressplot) # plt.title('\sigma_%i' % kstress) ## Plot max stress and failure strength plt.figure() for i in range(3): plt.subplot(1, 3, i+1) plt.bar(range(nply), Globalmaxstress[i,:]) plt.bar(range(nply), Globalminstress[i,:]) plt.scatter(range(nply),np.ones(nply) * Strength[i,0]) plt.scatter(range(nply),np.ones(nply) * Strength[i,1]) plt.xlabel('layer') plt.title('\sigma%i' % i) def plate_navier(): ''' composite plate bending with navier solution TODO - code needs to be converted from matlab ''' ## Plate a*b*h simply supported under q = q0 CLPT pass ''' q0,a,b,m,n,x,y = sp.symbols('q0 a b m n x y') Qmn = 4/(a*b)*sp.integrate( sp.integrate( q0*sp.sin(m*pi*x/a)*sp.sin(n*pi*y/b),(x,0,a)) ,(y,0,b)) dmn = pi**4 / b**4 * (DTij(1,1)*m**4*(b/a)**4 + 2* (DTij(1,2)+2*DTij(6,6)) *m**2*n**2*(b/a)**2 + DTij(2,2)*n**4) Wmn = Qmn/dmn; w0 = Wmn * sin(m*pi*x/a) * sin(n*pi*y/b); w0_ = subs(w0,[q0 a b],[-q0_ a_ b_] ); figure w0sum = 0; for n_ = 1:10 for m_ = 1:10 w0sum = w0sum + subs(w0_,[n m],[n_ m_]); end end w0sum; % xplot = linspace(0,a_,res); % yplot = linspace(0,b_,res); ii=1; for i = xplot jj=1; for j = yplot w0plot(ii,jj) = subs(w0sum,[x y],[i j]); jj=jj+1; end ii=ii+1; end surf(xplot,yplot,w0plot) colorbar set(gca,'PlotBoxAspectRatio',[2 1 1]); xlabel('length a, u(x)') ylabel('length b, v(y)') zlabel('w(z)') ''' class laminate(object): """ IN-WORK - laminate object for composite material analysis """ # constructor def __init__(self, plyangle, matindex, matname): # run when laminate is instantiated # loads materials used self.plyangle = plyangle self.matindex = matindex self.matname = matname self.__mat = self.__import_matprops(matname) # create a simple function to handle CTE properties def __alphaf(self, mat): return array([[mat.alpha1], [mat.alpha2], [0]]) self.laminatethk = array([self.__mat[matname[i]].plythk for i in matindex ]) self.nply = len(self.laminatethk) # number of plies self.H = np.sum(self.laminatethk) # plate thickness # area = a_width*H z = zeros(self.nply+1) zmid = zeros(self.nply) z[0] = -self.H/2 for i in range(self.nply): z[i+1] = z[i] + self.laminatethk[i] zmid[i] = z[i] + self.laminatethk[i]/2 self.z = z self.zmid = zmid self.__abdmatrix() def __Qf(self, E1,E2,nu12,G12): '''transversly isptropic compliance matrix. pg 58 herakovich G12 = E1/(2*(1+nu12)) if isotropic''' nu21 = E2*nu12/E1 Q = array([[E1/(1-nu12*nu21), E2*nu12/(1-nu12*nu21), 0], [ E2*nu12/(1-nu12*nu21), E2/(1-nu12*nu21), 0], [0, 0, G12]]) return Q def __T1(self, th): '''Stress Transform for Plane Stress th=ply angle in degrees voight notation for stress tranform. sigma1 = T1 @ sigmax recall T1(th)**-1 == T1(-th)''' n = sin(th*pi/180) m = cos(th*pi/180) T1 = array( [[m**2, n**2, 2*m*n], [n**2, m**2,-2*m*n], [-m*n, m*n,(m**2-n**2)]]) return T1 def __T2(self, th): '''Strain Transform for Plane Stress th=ply angle in degrees voight notation for strain transform. epsilon1 = T2 @ epsilonx''' n = sin(th*pi/180) m = cos(th*pi/180) T2 = array( [[m**2, n**2, m*n], [n**2, m**2,-m*n], [-2*m*n, 2*m*n, (m**2-n**2)]]) return T2 # private method def __abdmatrix(self): '''used within the object but not accessible outside''' #========================================================================== # ABD Matrix Compute #========================================================================== # Reduced stiffness matrix for a plane stress ply in principal coordinates # calcluating Q from the Compliance matrix may cause cancE1ation errors A = zeros((3,3)); B = zeros((3,3)); D = zeros((3,3)) for i in range(self.nply): # = nply Q = self.__Qf(self.__mat[self.matname[self.matindex[i]]].E1, self.__mat[self.matname[self.matindex[i]]].E2, self.__mat[self.matname[self.matindex[i]]].nu12, self.__mat[self.matname[self.matindex[i]]].G12 ) Qbar = inv(self.__T1(self.plyangle[i])) @ Q @ self.__T2(self.plyangle[i]) # solve(T1(plyangle[i]), Q) @ T2(plyangle[i]) A += Qbar*(self.z[i+1]-self.z[i]) # coupling stiffness B += (1/2)*Qbar*(self.z[i+1]**2-self.z[i]**2) # bending or flexural laminate stiffness relating moments to curvatures D += (1/3)*Qbar*(self.z[i+1]**3-self.z[i]**3) # laminate stiffness matrix ABD = zeros((6,6)) ABD[0:3,0:3] = A ABD[0:3,3:6] = B ABD[3:6,0:3] = B ABD[3:6,3:6] = D self.ABD = ABD # method def available_materials(self): '''show the materials available in the library''' matprops = pd.read_csv(os.path.join(os.path.dirname(__file__), "compositematerials.csv"), index_col=0) print('---available materials---') for k in matprops.columns.tolist(): print(k) print('-------------------------') # private method to be used internally def __import_matprops(self, mymaterial=['T300_5208','AL_7075']): ''' import material properties ''' matprops = pd.read_csv(os.path.join(os.path.dirname(__file__), "compositematerials.csv"), index_col=0) if mymaterial==[] or mymaterial=='': print(matprops.columns.tolist()) mat = matprops[mymaterial] #mat.applymap(lambda x:np.float(x)) mat = mat.applymap(lambda x:pd.to_numeric(x, errors='ignore')) return mat def failure_envelope_laminate(Nx,Ny,Nxy,Mx,My,Mxy,q0,mymat,layup): ''' find the miniumu margin give load conditions ''' # create a 45 carbon cloth panel with a 0.5 inch rohacell core _, FAILUREINDEX_MAXSTRESS_max = laminate_calcs(NM=[Nx,Ny,Nxy,Mx,My,Mxy], ek=[0,0,0,0,0,0], q0=q0, plyangle= layup, plymatindex=[0,0,0,0], materials = [mymat], platedim=[10,10], zoffset=0, SF=1.0, plots=0, prints=0) return FAILUREINDEX_MAXSTRESS_max def plot_single_max_failure_loads(mymat='E-Glass Epoxy fabric M10E-3783', mylayup=[0,45,45,0] ): ''' loops through and tries to find a load that is close to 0 and then attempts to find the root (ie margin=0) older version used newton method for root finding scipy.optimize.newton(laminate_min, guess) TODO: Current calculation is stupid using random points to plot. fix it by use FI, failure index instead of margin to generate a linear relationship and envelope ''' #laminate_min = lambda N: failure_envelope_laminate(N,0,0,0,0,0,0) loadnamelist = ['Nx','Ny','Nxy','Mx','My','Mxy','q0'] laminate_min_list = [] laminate_min_list.append(lambda N: failure_envelope_laminate(N,0,0,0,0,0,0,mymat,mylayup)) laminate_min_list.append(lambda N: failure_envelope_laminate(0,N,0,0,0,0,0,mymat,mylayup)) laminate_min_list.append(lambda N: failure_envelope_laminate(0,0,N,0,0,0,0,mymat,mylayup)) laminate_min_list.append(lambda N: failure_envelope_laminate(0,0,0,N,0,0,0,mymat,mylayup)) laminate_min_list.append(lambda N: failure_envelope_laminate(0,0,0,0,N,0,0,mymat,mylayup)) laminate_min_list.append(lambda N: failure_envelope_laminate(0,0,0,0,0,N,0,mymat,mylayup)) laminate_min_list.append(lambda N: failure_envelope_laminate(0,0,0,0,0,0,N,mymat,mylayup)) envelope_loads = [] N_t = array([0,1]) N_c = array([0,-1]) for loadname,laminate_min in zip(loadnamelist,laminate_min_list): # tension FI = [laminate_min(N) for N in N_t] m = (FI[1]-FI[0]) / (N_t[1] - N_t[0]) b = FI[1]-m*N_t[1] N_crit_t = (1-b) / m # compression FI = [laminate_min(N) for N in N_c] m = (FI[1]-FI[0]) / (N_c[1] - N_c[0]) b = FI[1]-m*N_c[1] N_crit_c = (1-b) / m envelope_loads.append('{} = {:.1f} , {:.1f}'.format(loadname,N_crit_t, N_crit_c)) print('------------- enveloped loads for {} {} -----------------'.format(mylayup, mymat)) for k in envelope_loads: print(k) # plot envelope Nx_env = [] Nxy_env = [] laminate_min = lambda N: failure_envelope_laminate(N,0,0,0,0,0,0,mymat,mylayup) # compression FI = [laminate_min(N) for N in N_c] m = (FI[1]-FI[0]) / (N_c[1] - N_c[0]) b = FI[1]-m*N_c[1] Nx_env.append( (1-b) / m ) Nxy_env.append( 0 ) # tension FI = [laminate_min(N) for N in N_t] m = (FI[1]-FI[0]) / (N_t[1] - N_t[0]) b = FI[1]-m*N_t[1] Nx_env.append( (1-b) / m ) Nxy_env.append( 0 ) laminate_min = lambda N: failure_envelope_laminate(0,0,N,0,0,0,0,mymat,mylayup) # compression FI = [laminate_min(N) for N in N_c] m = (FI[1]-FI[0]) / (N_c[1] - N_c[0]) b = FI[1]-m*N_c[1] Nxy_env.append( (1-b) / m ) Nx_env.append( 0 ) # tension FI = [laminate_min(N) for N in N_t] m = (FI[1]-FI[0]) / (N_t[1] - N_t[0]) b = FI[1]-m*N_t[1] Nxy_env.append( (1-b) / m ) Nx_env.append( 0 ) laminate_min_Nx_Nxy_func = lambda Nx,Nxy: failure_envelope_laminate(Nx,0,Nxy,0,0,0,0,mymat,mylayup) n = 500 f = 1.25 # < 1 # arr1 = np.random.randint(Nx_env[0]-abs(Nx_env[0]*f),Nx_env[0]+abs(Nx_env[0])*f,n) # arr2 = np.random.randint(Nx_env[1]-abs(Nx_env[1]*f),Nx_env[1]+abs(Nx_env[1])*f,n) # Nx_r = np.concatenate((arr1, arr2)) # # arr1 = np.random.randint(Nxy_env[2]-abs(Nxy_env[2])*f,Nxy_env[2]+abs(Nxy_env[2])*f,n) # arr2 = np.random.randint(Nxy_env[3]-abs(Nxy_env[3])*f,Nxy_env[3]+abs(Nxy_env[3])*f,n) # Nxy_r = np.concatenate((arr1, arr2)) Nx_r = np.random.randint(Nx_env[0]*f,Nx_env[1]*f, n) Nxy_r = np.random.randint(Nxy_env[2]*f,Nxy_env[3]*f, n) for Nx_ri, Nxy_ri in zip(Nx_r, Nxy_r): FI = laminate_min_Nx_Nxy_func(Nx_ri, Nxy_ri) if FI < 1: Nx_env.append(Nx_ri) Nxy_env.append(Nxy_ri) points = array([ [x,xy] for x,xy in zip(Nx_env, Nxy_env)]) hull = scipy.spatial.ConvexHull(points) plot(points[:,0], points[:,1], 'bo') for simplex in hull.simplices: plot(points[simplex, 0], points[simplex, 1], 'k-') xlabel('Nx, lb/in') ylabel('Nxy, lb/in') title('Failure envelope') return envelope_loads def my_laminate_with_loading(): # loads lbs/in Nx = 50 Ny = 0 Nxy = 0 Mx = 0 My = 0 Mxy = 0 q0 = 0 # pressure # Qx = 0 # Qy = 0 a_width = 50 b_length = 3.14*6.75 ## sandwich laminate # plyangle= [45,45,0, 45,45], # plymatindex=[0, 0, 1, 0, 0], # create a 45 carbon cloth panel with a 0.5 inch rohacell core laminate_calcs(NM=[Nx,Ny,Nxy,Mx,My,Mxy], ek=[0,0,0,0,0,0], q0=q0, plyangle= [0,60,-60,-60,60,0], plymatindex=[0,0,0,0,0,0], materials = ['E-Glass Epoxy Uni'], platedim=[a_width,b_length], zoffset=0, SF=2.0, plots=0, prints=1) if __name__=='__main__': #plot_single_max_failure_loads() #plot_failure_index() my_laminate_with_loading() #material_plots(['E-Glass Epoxy fabric M10E-3783']) #plate() #plot_Nx_Nxy_failure_envelope(['Carbon_cloth_AGP3705H']) #plot_single_max_failure_loads() # # reload modules # import importlib ; importlib.reload # from composites import laminate # plyangle = [0,45] # matindex = [0,0] # matname = ['graphite-polymer_SI'] # lam1 = laminate(plyangle, matindex, matname) # lam1.ABD
mit
gunan/tensorflow
tensorflow/python/keras/engine/data_adapter_test.py
1
43158
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """DataAdapter tests.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import math from absl.testing import parameterized import numpy as np from tensorflow.python import keras from tensorflow.python.data.experimental.ops import cardinality from tensorflow.python.data.ops import dataset_ops from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import test_util from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import testing_utils from tensorflow.python.keras.engine import data_adapter from tensorflow.python.keras.utils import data_utils from tensorflow.python.ops import array_ops from tensorflow.python.ops import sparse_ops from tensorflow.python.platform import test from tensorflow.python.util import nest class DummyArrayLike(object): """Dummy array-like object.""" def __init__(self, data): self.data = data def __len__(self): return len(self.data) def __getitem__(self, key): return self.data[key] @property def shape(self): return self.data.shape @property def dtype(self): return self.data.dtype def fail_on_convert(x, **kwargs): _ = x _ = kwargs raise TypeError('Cannot convert DummyArrayLike to a tensor') ops.register_tensor_conversion_function(DummyArrayLike, fail_on_convert) class DataAdapterTestBase(keras_parameterized.TestCase): def setUp(self): super(DataAdapterTestBase, self).setUp() self.batch_size = 5 self.numpy_input = np.zeros((50, 10)) self.numpy_target = np.ones(50) self.tensor_input = constant_op.constant(2.0, shape=(50, 10)) self.tensor_target = array_ops.ones((50,)) self.arraylike_input = DummyArrayLike(self.numpy_input) self.arraylike_target = DummyArrayLike(self.numpy_target) self.dataset_input = dataset_ops.DatasetV2.from_tensor_slices( (self.numpy_input, self.numpy_target)).shuffle(50).batch( self.batch_size) def generator(): while True: yield (np.zeros((self.batch_size, 10)), np.ones(self.batch_size)) self.generator_input = generator() self.iterator_input = data_utils.threadsafe_generator(generator)() self.sequence_input = TestSequence(batch_size=self.batch_size, feature_shape=10) self.model = keras.models.Sequential( [keras.layers.Dense(8, input_shape=(10,), activation='softmax')]) class TestSequence(data_utils.Sequence): def __init__(self, batch_size, feature_shape): self.batch_size = batch_size self.feature_shape = feature_shape def __getitem__(self, item): return (np.zeros((self.batch_size, self.feature_shape)), np.ones((self.batch_size,))) def __len__(self): return 10 class TensorLikeDataAdapterTest(DataAdapterTestBase): def setUp(self): super(TensorLikeDataAdapterTest, self).setUp() self.adapter_cls = data_adapter.TensorLikeDataAdapter def test_can_handle_numpy(self): self.assertTrue(self.adapter_cls.can_handle(self.numpy_input)) self.assertTrue( self.adapter_cls.can_handle(self.numpy_input, self.numpy_target)) self.assertFalse(self.adapter_cls.can_handle(self.dataset_input)) self.assertFalse(self.adapter_cls.can_handle(self.generator_input)) self.assertFalse(self.adapter_cls.can_handle(self.sequence_input)) def test_size_numpy(self): adapter = self.adapter_cls( self.numpy_input, self.numpy_target, batch_size=5) self.assertEqual(adapter.get_size(), 10) self.assertFalse(adapter.has_partial_batch()) def test_batch_size_numpy(self): adapter = self.adapter_cls( self.numpy_input, self.numpy_target, batch_size=5) self.assertEqual(adapter.batch_size(), 5) def test_partial_batch_numpy(self): adapter = self.adapter_cls( self.numpy_input, self.numpy_target, batch_size=4) self.assertEqual(adapter.get_size(), 13) # 50/4 self.assertTrue(adapter.has_partial_batch()) self.assertEqual(adapter.partial_batch_size(), 2) def test_epochs(self): num_epochs = 3 adapter = self.adapter_cls( self.numpy_input, self.numpy_target, batch_size=5, epochs=num_epochs) ds_iter = iter(adapter.get_dataset()) num_batches_per_epoch = self.numpy_input.shape[0] // 5 for _ in range(num_batches_per_epoch * num_epochs): next(ds_iter) with self.assertRaises(StopIteration): next(ds_iter) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_training_numpy(self): self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.model.fit(self.numpy_input, self.numpy_target, batch_size=5) def test_can_handle_pandas(self): try: import pandas as pd # pylint: disable=g-import-not-at-top except ImportError: self.skipTest('Skipping test because pandas is not installed.') self.assertTrue(self.adapter_cls.can_handle(pd.DataFrame(self.numpy_input))) self.assertTrue( self.adapter_cls.can_handle(pd.DataFrame(self.numpy_input)[0])) self.assertTrue( self.adapter_cls.can_handle( pd.DataFrame(self.numpy_input), pd.DataFrame(self.numpy_input)[0])) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_training_pandas(self): try: import pandas as pd # pylint: disable=g-import-not-at-top except ImportError: self.skipTest('Skipping test because pandas is not installed.') input_a = keras.Input(shape=(3,), name='input_a') input_b = keras.Input(shape=(3,), name='input_b') input_c = keras.Input(shape=(1,), name='input_b') x = keras.layers.Dense(4, name='dense_1')(input_a) y = keras.layers.Dense(3, name='dense_2')(input_b) z = keras.layers.Dense(1, name='dense_3')(input_c) model_1 = keras.Model(inputs=input_a, outputs=x) model_2 = keras.Model(inputs=[input_a, input_b], outputs=[x, y]) model_3 = keras.Model(inputs=input_c, outputs=z) model_1.compile(optimizer='rmsprop', loss='mse') model_2.compile(optimizer='rmsprop', loss='mse') input_a_np = np.random.random((10, 3)) input_b_np = np.random.random((10, 3)) input_a_df = pd.DataFrame(input_a_np) input_b_df = pd.DataFrame(input_b_np) output_a_df = pd.DataFrame(np.random.random((10, 4))) output_b_df = pd.DataFrame(np.random.random((10, 3))) model_1.fit(input_a_df, output_a_df) model_2.fit([input_a_df, input_b_df], [output_a_df, output_b_df]) model_1.fit([input_a_df], [output_a_df]) model_1.fit({'input_a': input_a_df}, output_a_df) model_2.fit({'input_a': input_a_df, 'input_b': input_b_df}, [output_a_df, output_b_df]) model_1.evaluate(input_a_df, output_a_df) model_2.evaluate([input_a_df, input_b_df], [output_a_df, output_b_df]) model_1.evaluate([input_a_df], [output_a_df]) model_1.evaluate({'input_a': input_a_df}, output_a_df) model_2.evaluate({'input_a': input_a_df, 'input_b': input_b_df}, [output_a_df, output_b_df]) # Verify predicting on pandas vs numpy returns the same result predict_1_pandas = model_1.predict(input_a_df) predict_2_pandas = model_2.predict([input_a_df, input_b_df]) predict_3_pandas = model_3.predict(input_a_df[0]) predict_1_numpy = model_1.predict(input_a_np) predict_2_numpy = model_2.predict([input_a_np, input_b_np]) predict_3_numpy = model_3.predict(np.asarray(input_a_df[0])) self.assertAllClose(predict_1_numpy, predict_1_pandas) self.assertAllClose(predict_2_numpy, predict_2_pandas) self.assertAllClose(predict_3_numpy, predict_3_pandas) # Extra ways to pass in dataframes model_1.predict([input_a_df]) model_1.predict({'input_a': input_a_df}) model_2.predict({'input_a': input_a_df, 'input_b': input_b_df}) def test_can_handle(self): self.assertTrue(self.adapter_cls.can_handle(self.tensor_input)) self.assertTrue( self.adapter_cls.can_handle(self.tensor_input, self.tensor_target)) self.assertFalse(self.adapter_cls.can_handle(self.arraylike_input)) self.assertFalse( self.adapter_cls.can_handle(self.arraylike_input, self.arraylike_target)) self.assertFalse(self.adapter_cls.can_handle(self.dataset_input)) self.assertFalse(self.adapter_cls.can_handle(self.generator_input)) self.assertFalse(self.adapter_cls.can_handle(self.sequence_input)) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_training(self): self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.model.fit(self.tensor_input, self.tensor_target, batch_size=5) def test_size(self): adapter = self.adapter_cls( self.tensor_input, self.tensor_target, batch_size=5) self.assertEqual(adapter.get_size(), 10) self.assertFalse(adapter.has_partial_batch()) def test_shuffle_correctness(self): with context.eager_mode(): num_samples = 100 batch_size = 32 x = np.arange(num_samples) np.random.seed(99) adapter = self.adapter_cls( x, y=None, batch_size=batch_size, shuffle=True, epochs=2) def _get_epoch(ds_iter): ds_data = [] for _ in range(int(math.ceil(num_samples / batch_size))): ds_data.append(next(ds_iter)[0].numpy()) return np.concatenate(ds_data) ds_iter = iter(adapter.get_dataset()) # First epoch. epoch_data = _get_epoch(ds_iter) # Check that shuffling occurred. self.assertNotAllClose(x, epoch_data) # Check that each elements appears, and only once. self.assertAllClose(x, np.sort(epoch_data)) # Second epoch. second_epoch_data = _get_epoch(ds_iter) # Check that shuffling occurred. self.assertNotAllClose(x, second_epoch_data) # Check that shuffling is different across epochs. self.assertNotAllClose(epoch_data, second_epoch_data) # Check that each elements appears, and only once. self.assertAllClose(x, np.sort(second_epoch_data)) def test_batch_shuffle_correctness(self): with context.eager_mode(): num_samples = 100 batch_size = 6 x = np.arange(num_samples) np.random.seed(99) adapter = self.adapter_cls( x, y=None, batch_size=batch_size, shuffle='batch', epochs=2) def _get_epoch_batches(ds_iter): ds_data = [] for _ in range(int(math.ceil(num_samples / batch_size))): ds_data.append(next(ds_iter)[0].numpy()) return ds_data ds_iter = iter(adapter.get_dataset()) # First epoch. epoch_batch_data = _get_epoch_batches(ds_iter) epoch_data = np.concatenate(epoch_batch_data) def _verify_batch(batch): # Verify that a batch contains only contiguous data, and that it has # been shuffled. shuffled_batch = np.sort(batch) self.assertNotAllClose(batch, shuffled_batch) for i in range(1, len(batch)): self.assertEqual(shuffled_batch[i-1] + 1, shuffled_batch[i]) # Assert that the data within each batch remains contiguous for batch in epoch_batch_data: _verify_batch(batch) # Check that individual batches are unshuffled # Check that shuffling occurred. self.assertNotAllClose(x, epoch_data) # Check that each elements appears, and only once. self.assertAllClose(x, np.sort(epoch_data)) # Second epoch. second_epoch_batch_data = _get_epoch_batches(ds_iter) second_epoch_data = np.concatenate(second_epoch_batch_data) # Assert that the data within each batch remains contiguous for batch in second_epoch_batch_data: _verify_batch(batch) # Check that shuffling occurred. self.assertNotAllClose(x, second_epoch_data) # Check that shuffling is different across epochs. self.assertNotAllClose(epoch_data, second_epoch_data) # Check that each elements appears, and only once. self.assertAllClose(x, np.sort(second_epoch_data)) @parameterized.named_parameters( ('batch_size_5', 5, None, 5), ('batch_size_50', 50, 4, 50), # Sanity check: batch_size takes precedence ('steps_1', None, 1, 50), ('steps_4', None, 4, 13), ) def test_batch_size(self, batch_size_in, steps, batch_size_out): adapter = self.adapter_cls( self.tensor_input, self.tensor_target, batch_size=batch_size_in, steps=steps) self.assertEqual(adapter.batch_size(), batch_size_out) @parameterized.named_parameters( ('batch_size_5', 5, None, 10, 0), ('batch_size_4', 4, None, 13, 2), ('steps_1', None, 1, 1, 0), ('steps_5', None, 5, 5, 0), ('steps_4', None, 4, 4, 11), ) def test_partial_batch( self, batch_size_in, steps, size, partial_batch_size): adapter = self.adapter_cls( self.tensor_input, self.tensor_target, batch_size=batch_size_in, steps=steps) self.assertEqual(adapter.get_size(), size) # 50/steps self.assertEqual(adapter.has_partial_batch(), bool(partial_batch_size)) self.assertEqual(adapter.partial_batch_size(), partial_batch_size or None) class GenericArrayLikeDataAdapterTest(DataAdapterTestBase): def setUp(self): super(GenericArrayLikeDataAdapterTest, self).setUp() self.adapter_cls = data_adapter.GenericArrayLikeDataAdapter def test_can_handle_some_numpy(self): self.assertTrue(self.adapter_cls.can_handle( self.arraylike_input)) self.assertTrue( self.adapter_cls.can_handle(self.arraylike_input, self.arraylike_target)) # Because adapters are mutually exclusive, don't handle cases # where all the data is numpy or an eagertensor self.assertFalse(self.adapter_cls.can_handle(self.numpy_input)) self.assertFalse( self.adapter_cls.can_handle(self.numpy_input, self.numpy_target)) self.assertFalse(self.adapter_cls.can_handle(self.tensor_input)) self.assertFalse( self.adapter_cls.can_handle(self.tensor_input, self.tensor_target)) # But do handle mixes that include generic arraylike data self.assertTrue( self.adapter_cls.can_handle(self.numpy_input, self.arraylike_target)) self.assertTrue( self.adapter_cls.can_handle(self.arraylike_input, self.numpy_target)) self.assertTrue( self.adapter_cls.can_handle(self.arraylike_input, self.tensor_target)) self.assertTrue( self.adapter_cls.can_handle(self.tensor_input, self.arraylike_target)) self.assertFalse(self.adapter_cls.can_handle(self.dataset_input)) self.assertFalse(self.adapter_cls.can_handle(self.generator_input)) self.assertFalse(self.adapter_cls.can_handle(self.sequence_input)) def test_size(self): adapter = self.adapter_cls( self.arraylike_input, self.arraylike_target, batch_size=5) self.assertEqual(adapter.get_size(), 10) self.assertFalse(adapter.has_partial_batch()) def test_epochs(self): num_epochs = 3 adapter = self.adapter_cls( self.arraylike_input, self.numpy_target, batch_size=5, epochs=num_epochs) ds_iter = iter(adapter.get_dataset()) num_batches_per_epoch = self.numpy_input.shape[0] // 5 for _ in range(num_batches_per_epoch * num_epochs): next(ds_iter) with self.assertRaises(StopIteration): next(ds_iter) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_training(self): # First verify that DummyArrayLike can't be converted to a Tensor with self.assertRaises(TypeError): ops.convert_to_tensor_v2(self.arraylike_input) # Then train on the array like. # It should not be converted to a tensor directly (which would force it into # memory), only the sliced data should be converted. self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.model.fit(self.arraylike_input, self.arraylike_target, batch_size=5) self.model.fit(self.arraylike_input, self.arraylike_target, shuffle=True, batch_size=5) self.model.fit(self.arraylike_input, self.arraylike_target, shuffle='batch', batch_size=5) self.model.evaluate(self.arraylike_input, self.arraylike_target, batch_size=5) self.model.predict(self.arraylike_input, batch_size=5) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_training_numpy_target(self): self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.model.fit(self.arraylike_input, self.numpy_target, batch_size=5) self.model.fit(self.arraylike_input, self.numpy_target, shuffle=True, batch_size=5) self.model.fit(self.arraylike_input, self.numpy_target, shuffle='batch', batch_size=5) self.model.evaluate(self.arraylike_input, self.numpy_target, batch_size=5) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_training_tensor_target(self): self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.model.fit(self.arraylike_input, self.tensor_target, batch_size=5) self.model.fit(self.arraylike_input, self.tensor_target, shuffle=True, batch_size=5) self.model.fit(self.arraylike_input, self.tensor_target, shuffle='batch', batch_size=5) self.model.evaluate(self.arraylike_input, self.tensor_target, batch_size=5) def test_shuffle_correctness(self): with context.eager_mode(): num_samples = 100 batch_size = 32 x = DummyArrayLike(np.arange(num_samples)) np.random.seed(99) adapter = self.adapter_cls( x, y=None, batch_size=batch_size, shuffle=True, epochs=2) def _get_epoch(ds_iter): ds_data = [] for _ in range(int(math.ceil(num_samples / batch_size))): ds_data.append(next(ds_iter)[0].numpy()) return np.concatenate(ds_data) ds_iter = iter(adapter.get_dataset()) # First epoch. epoch_data = _get_epoch(ds_iter) # Check that shuffling occurred. self.assertNotAllClose(x, epoch_data) # Check that each elements appears, and only once. self.assertAllClose(x, np.sort(epoch_data)) # Second epoch. second_epoch_data = _get_epoch(ds_iter) # Check that shuffling occurred. self.assertNotAllClose(x, second_epoch_data) # Check that shuffling is different across epochs. self.assertNotAllClose(epoch_data, second_epoch_data) # Check that each elements appears, and only once. self.assertAllClose(x, np.sort(second_epoch_data)) def test_batch_shuffle_correctness(self): with context.eager_mode(): num_samples = 100 batch_size = 6 x = DummyArrayLike(np.arange(num_samples)) np.random.seed(99) adapter = self.adapter_cls( x, y=None, batch_size=batch_size, shuffle='batch', epochs=2) def _get_epoch_batches(ds_iter): ds_data = [] for _ in range(int(math.ceil(num_samples / batch_size))): ds_data.append(next(ds_iter)[0].numpy()) return ds_data ds_iter = iter(adapter.get_dataset()) # First epoch. epoch_batch_data = _get_epoch_batches(ds_iter) epoch_data = np.concatenate(epoch_batch_data) def _verify_batch(batch): # Verify that a batch contains only contiguous data, but that it has # been shuffled. shuffled_batch = np.sort(batch) self.assertNotAllClose(batch, shuffled_batch) for i in range(1, len(batch)): self.assertEqual(shuffled_batch[i-1] + 1, shuffled_batch[i]) # Assert that the data within each batch is shuffled contiguous data for batch in epoch_batch_data: _verify_batch(batch) # Check that individual batches are unshuffled # Check that shuffling occurred. self.assertNotAllClose(x, epoch_data) # Check that each elements appears, and only once. self.assertAllClose(x, np.sort(epoch_data)) # Second epoch. second_epoch_batch_data = _get_epoch_batches(ds_iter) second_epoch_data = np.concatenate(second_epoch_batch_data) # Assert that the data within each batch remains contiguous for batch in second_epoch_batch_data: _verify_batch(batch) # Check that shuffling occurred. self.assertNotAllClose(x, second_epoch_data) # Check that shuffling is different across epochs. self.assertNotAllClose(epoch_data, second_epoch_data) # Check that each elements appears, and only once. self.assertAllClose(x, np.sort(second_epoch_data)) @parameterized.named_parameters( ('batch_size_5', 5, None, 5), ('batch_size_50', 50, 4, 50), # Sanity check: batch_size takes precedence ('steps_1', None, 1, 50), ('steps_4', None, 4, 13), ) def test_batch_size(self, batch_size_in, steps, batch_size_out): adapter = self.adapter_cls( self.arraylike_input, self.arraylike_target, batch_size=batch_size_in, steps=steps) self.assertEqual(adapter.batch_size(), batch_size_out) @parameterized.named_parameters( ('batch_size_5', 5, None, 10, 0), ('batch_size_4', 4, None, 13, 2), ('steps_1', None, 1, 1, 0), ('steps_5', None, 5, 5, 0), ('steps_4', None, 4, 4, 11), ) def test_partial_batch( self, batch_size_in, steps, size, partial_batch_size): adapter = self.adapter_cls( self.arraylike_input, self.arraylike_target, batch_size=batch_size_in, steps=steps) self.assertEqual(adapter.get_size(), size) # 50/steps self.assertEqual(adapter.has_partial_batch(), bool(partial_batch_size)) self.assertEqual(adapter.partial_batch_size(), partial_batch_size or None) class DatasetAdapterTest(DataAdapterTestBase): def setUp(self): super(DatasetAdapterTest, self).setUp() self.adapter_cls = data_adapter.DatasetAdapter def test_can_handle(self): self.assertFalse(self.adapter_cls.can_handle(self.numpy_input)) self.assertFalse(self.adapter_cls.can_handle(self.tensor_input)) self.assertTrue(self.adapter_cls.can_handle(self.dataset_input)) self.assertFalse(self.adapter_cls.can_handle(self.generator_input)) self.assertFalse(self.adapter_cls.can_handle(self.sequence_input)) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_training(self): dataset = self.adapter_cls(self.dataset_input).get_dataset() self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.model.fit(dataset) def test_size(self): adapter = self.adapter_cls(self.dataset_input) self.assertIsNone(adapter.get_size()) def test_batch_size(self): adapter = self.adapter_cls(self.dataset_input) self.assertIsNone(adapter.batch_size()) def test_partial_batch(self): adapter = self.adapter_cls(self.dataset_input) self.assertFalse(adapter.has_partial_batch()) self.assertIsNone(adapter.partial_batch_size()) def test_invalid_targets_argument(self): with self.assertRaisesRegexp(ValueError, r'`y` argument is not supported'): self.adapter_cls(self.dataset_input, y=self.dataset_input) def test_invalid_sample_weights_argument(self): with self.assertRaisesRegexp(ValueError, r'`sample_weight` argument is not supported'): self.adapter_cls(self.dataset_input, sample_weights=self.dataset_input) class GeneratorDataAdapterTest(DataAdapterTestBase): def setUp(self): super(GeneratorDataAdapterTest, self).setUp() self.adapter_cls = data_adapter.GeneratorDataAdapter def test_can_handle(self): self.assertFalse(self.adapter_cls.can_handle(self.numpy_input)) self.assertFalse(self.adapter_cls.can_handle(self.tensor_input)) self.assertFalse(self.adapter_cls.can_handle(self.dataset_input)) self.assertTrue(self.adapter_cls.can_handle(self.generator_input)) self.assertFalse(self.adapter_cls.can_handle(self.sequence_input)) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_training(self): self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.model.fit(self.generator_input, steps_per_epoch=10) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) @test_util.run_v2_only @data_utils.dont_use_multiprocessing_pool def test_with_multiprocessing_training(self): self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.model.fit(self.iterator_input, workers=1, use_multiprocessing=True, max_queue_size=10, steps_per_epoch=10) # Fit twice to ensure there isn't any duplication that prevent the worker # from starting. self.model.fit(self.iterator_input, workers=1, use_multiprocessing=True, max_queue_size=10, steps_per_epoch=10) def test_size(self): adapter = self.adapter_cls(self.generator_input) self.assertIsNone(adapter.get_size()) def test_batch_size(self): adapter = self.adapter_cls(self.generator_input) self.assertEqual(adapter.batch_size(), None) self.assertEqual(adapter.representative_batch_size(), 5) def test_partial_batch(self): adapter = self.adapter_cls(self.generator_input) self.assertFalse(adapter.has_partial_batch()) self.assertIsNone(adapter.partial_batch_size()) def test_invalid_targets_argument(self): with self.assertRaisesRegexp(ValueError, r'`y` argument is not supported'): self.adapter_cls(self.generator_input, y=self.generator_input) def test_invalid_sample_weights_argument(self): with self.assertRaisesRegexp(ValueError, r'`sample_weight` argument is not supported'): self.adapter_cls( self.generator_input, sample_weights=self.generator_input) def test_not_shuffled(self): def generator(): for i in range(10): yield np.ones((1, 1)) * i adapter = self.adapter_cls(generator(), shuffle=True) with context.eager_mode(): for i, data in enumerate(adapter.get_dataset()): self.assertEqual(i, data[0].numpy().flatten()) class KerasSequenceAdapterTest(DataAdapterTestBase): def setUp(self): super(KerasSequenceAdapterTest, self).setUp() self.adapter_cls = data_adapter.KerasSequenceAdapter def test_can_handle(self): self.assertFalse(self.adapter_cls.can_handle(self.numpy_input)) self.assertFalse(self.adapter_cls.can_handle(self.tensor_input)) self.assertFalse(self.adapter_cls.can_handle(self.dataset_input)) self.assertFalse(self.adapter_cls.can_handle(self.generator_input)) self.assertTrue(self.adapter_cls.can_handle(self.sequence_input)) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) def test_training(self): self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.model.fit(self.sequence_input) @keras_parameterized.run_all_keras_modes(always_skip_v1=True) @test_util.run_v2_only @data_utils.dont_use_multiprocessing_pool def test_with_multiprocessing_training(self): self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', run_eagerly=testing_utils.should_run_eagerly()) self.model.fit(self.sequence_input, workers=1, use_multiprocessing=True, max_queue_size=10, steps_per_epoch=10) # Fit twice to ensure there isn't any duplication that prevent the worker # from starting. self.model.fit(self.sequence_input, workers=1, use_multiprocessing=True, max_queue_size=10, steps_per_epoch=10) def test_size(self): adapter = self.adapter_cls(self.sequence_input) self.assertEqual(adapter.get_size(), 10) def test_batch_size(self): adapter = self.adapter_cls(self.sequence_input) self.assertEqual(adapter.batch_size(), None) self.assertEqual(adapter.representative_batch_size(), 5) def test_partial_batch(self): adapter = self.adapter_cls(self.sequence_input) self.assertFalse(adapter.has_partial_batch()) self.assertIsNone(adapter.partial_batch_size()) def test_invalid_targets_argument(self): with self.assertRaisesRegexp(ValueError, r'`y` argument is not supported'): self.adapter_cls(self.sequence_input, y=self.sequence_input) def test_invalid_sample_weights_argument(self): with self.assertRaisesRegexp(ValueError, r'`sample_weight` argument is not supported'): self.adapter_cls(self.sequence_input, sample_weights=self.sequence_input) class DataHandlerTest(keras_parameterized.TestCase): def test_finite_dataset_with_steps_per_epoch(self): data = dataset_ops.Dataset.from_tensor_slices([0, 1, 2, 3]).batch(1) # User can choose to only partially consume `Dataset`. data_handler = data_adapter.DataHandler( data, initial_epoch=0, epochs=2, steps_per_epoch=2) self.assertEqual(data_handler.inferred_steps, 2) self.assertFalse(data_handler._adapter.should_recreate_iterator()) returned_data = [] for _, iterator in data_handler.enumerate_epochs(): epoch_data = [] for _ in data_handler.steps(): epoch_data.append(next(iterator).numpy()) returned_data.append(epoch_data) self.assertEqual(returned_data, [[0, 1], [2, 3]]) def test_finite_dataset_without_steps_per_epoch(self): data = dataset_ops.Dataset.from_tensor_slices([0, 1, 2]).batch(1) data_handler = data_adapter.DataHandler(data, initial_epoch=0, epochs=2) self.assertEqual(data_handler.inferred_steps, 3) returned_data = [] for _, iterator in data_handler.enumerate_epochs(): epoch_data = [] for _ in data_handler.steps(): epoch_data.append(next(iterator).numpy()) returned_data.append(epoch_data) self.assertEqual(returned_data, [[0, 1, 2], [0, 1, 2]]) def test_finite_dataset_with_steps_per_epoch_exact_size(self): data = dataset_ops.Dataset.from_tensor_slices([0, 1, 2, 3]).batch(1) # If user specifies exact size of `Dataset` as `steps_per_epoch`, # create a new iterator each epoch. data_handler = data_adapter.DataHandler( data, initial_epoch=0, epochs=2, steps_per_epoch=4) self.assertTrue(data_handler._adapter.should_recreate_iterator()) returned_data = [] for _, iterator in data_handler.enumerate_epochs(): epoch_data = [] for _ in data_handler.steps(): epoch_data.append(next(iterator).numpy()) returned_data.append(epoch_data) self.assertEqual(returned_data, [[0, 1, 2, 3], [0, 1, 2, 3]]) def test_infinite_dataset_with_steps_per_epoch(self): data = dataset_ops.Dataset.from_tensor_slices([0, 1, 2]).batch(1).repeat() data_handler = data_adapter.DataHandler( data, initial_epoch=0, epochs=2, steps_per_epoch=3) returned_data = [] for _, iterator in data_handler.enumerate_epochs(): epoch_data = [] for _ in data_handler.steps(): epoch_data.append(next(iterator).numpy()) returned_data.append(epoch_data) self.assertEqual(returned_data, [[0, 1, 2], [0, 1, 2]]) def test_unknown_cardinality_dataset_with_steps_per_epoch(self): ds = dataset_ops.DatasetV2.from_tensor_slices([0, 1, 2, 3, 4, 5, 6]) filtered_ds = ds.filter(lambda x: x < 4) self.assertEqual( cardinality.cardinality(filtered_ds).numpy(), cardinality.UNKNOWN) # User can choose to only partially consume `Dataset`. data_handler = data_adapter.DataHandler( filtered_ds, initial_epoch=0, epochs=2, steps_per_epoch=2) self.assertFalse(data_handler._adapter.should_recreate_iterator()) returned_data = [] for _, iterator in data_handler.enumerate_epochs(): epoch_data = [] for _ in data_handler.steps(): epoch_data.append(next(iterator)) returned_data.append(epoch_data) returned_data = self.evaluate(returned_data) self.assertEqual(returned_data, [[0, 1], [2, 3]]) self.assertEqual(data_handler.inferred_steps, 2) def test_unknown_cardinality_dataset_without_steps_per_epoch(self): ds = dataset_ops.DatasetV2.from_tensor_slices([0, 1, 2, 3, 4, 5, 6]) filtered_ds = ds.filter(lambda x: x < 4) self.assertEqual( cardinality.cardinality(filtered_ds).numpy(), cardinality.UNKNOWN) data_handler = data_adapter.DataHandler( filtered_ds, initial_epoch=0, epochs=2) self.assertEqual(data_handler.inferred_steps, None) self.assertTrue(data_handler._adapter.should_recreate_iterator()) returned_data = [] for _, iterator in data_handler.enumerate_epochs(): epoch_data = [] with data_handler.catch_stop_iteration(): for _ in data_handler.steps(): epoch_data.append(next(iterator)) returned_data.append(epoch_data) returned_data = self.evaluate(returned_data) self.assertEqual(returned_data, [[0, 1, 2, 3], [0, 1, 2, 3]]) self.assertEqual(data_handler.inferred_steps, 4) def test_insufficient_data(self): ds = dataset_ops.DatasetV2.from_tensor_slices([0, 1]) ds = ds.filter(lambda *args, **kwargs: True) data_handler = data_adapter.DataHandler( ds, initial_epoch=0, epochs=2, steps_per_epoch=3) returned_data = [] for _, iterator in data_handler.enumerate_epochs(): epoch_data = [] for _ in data_handler.steps(): with data_handler.catch_stop_iteration(): epoch_data.append(next(iterator)) returned_data.append(epoch_data) returned_data = self.evaluate(returned_data) self.assertTrue(data_handler._insufficient_data) self.assertEqual(returned_data, [[0, 1]]) def test_numpy(self): x = np.array([0, 1, 2]) y = np.array([0, 2, 4]) sw = np.array([0, 4, 8]) data_handler = data_adapter.DataHandler( x=x, y=y, sample_weight=sw, batch_size=1, epochs=2) returned_data = [] for _, iterator in data_handler.enumerate_epochs(): epoch_data = [] for _ in data_handler.steps(): epoch_data.append(next(iterator)) returned_data.append(epoch_data) returned_data = self.evaluate(returned_data) self.assertEqual(returned_data, [[(0, 0, 0), (1, 2, 4), (2, 4, 8)], [(0, 0, 0), (1, 2, 4), (2, 4, 8)]]) def test_generator(self): def generator(): for _ in range(2): for step in range(3): yield (ops.convert_to_tensor_v2([step]),) data_handler = data_adapter.DataHandler( generator(), epochs=2, steps_per_epoch=3) returned_data = [] for _, iterator in data_handler.enumerate_epochs(): epoch_data = [] for _ in data_handler.steps(): epoch_data.append(next(iterator)) returned_data.append(epoch_data) returned_data = self.evaluate(returned_data) self.assertEqual(returned_data, [[([0],), ([1],), ([2],)], [([0],), ([1],), ([2],)]]) def test_composite_tensor(self): st = sparse_tensor.SparseTensor( indices=[[0, 0], [1, 0], [2, 0]], values=[0, 1, 2], dense_shape=[3, 1]) data_handler = data_adapter.DataHandler(st, epochs=2, steps_per_epoch=3) returned_data = [] for _, iterator in data_handler.enumerate_epochs(): epoch_data = [] for _ in data_handler.steps(): epoch_data.append(next(iterator)) returned_data.append(epoch_data) returned_data = self.evaluate( nest.map_structure(sparse_ops.sparse_tensor_to_dense, returned_data)) self.assertEqual(returned_data, [[([0],), ([1],), ([2],)], [([0],), ([1],), ([2],)]]) def test_list_of_scalars(self): data_handler = data_adapter.DataHandler([[0], [1], [2]], epochs=2, steps_per_epoch=3) returned_data = [] for _, iterator in data_handler.enumerate_epochs(): epoch_data = [] for _ in data_handler.steps(): epoch_data.append(next(iterator)) returned_data.append(epoch_data) returned_data = self.evaluate(returned_data) self.assertEqual(returned_data, [[([0],), ([1],), ([2],)], [([0],), ([1],), ([2],)]]) def test_class_weight_user_errors(self): with self.assertRaisesRegexp(ValueError, 'to be a dict with keys'): data_adapter.DataHandler( x=[[0], [1], [2]], y=[[2], [1], [0]], batch_size=1, sample_weight=[[1.], [2.], [4.]], class_weight={ 0: 0.5, 1: 1., 3: 1.5 # Skips class `2`. }) with self.assertRaisesRegexp(ValueError, 'with a single output'): data_adapter.DataHandler( x=np.ones((10, 1)), y=[np.ones((10, 1)), np.zeros((10, 1))], batch_size=2, class_weight={ 0: 0.5, 1: 1., 2: 1.5 }) class TestValidationSplit(keras_parameterized.TestCase): @parameterized.named_parameters(('numpy_arrays', True), ('tensors', False)) def test_validation_split_shuffled(self, use_numpy): if use_numpy: x = np.array([0, 1, 2, 3, 4]) y = np.array([0, 2, 4, 6, 8]) sw = np.array([0, 4, 8, 12, 16]) else: x = ops.convert_to_tensor_v2([0, 1, 2, 3, 4]) y = ops.convert_to_tensor_v2([0, 2, 4, 6, 8]) sw = ops.convert_to_tensor_v2([0, 4, 8, 12, 16]) (train_x, train_y, train_sw), (val_x, val_y, val_sw) = ( data_adapter.train_validation_split((x, y, sw), validation_split=0.2)) self.assertEqual(int(train_x.shape[0]), 4) self.assertEqual(int(train_y.shape[0]), 4) self.assertEqual(int(train_sw.shape[0]), 4) for i in range(4): # Check that all arrays were shuffled in identical order. self.assertEqual(2 * train_x[i].numpy(), train_y[i].numpy()) self.assertEqual(2 * train_y[i].numpy(), train_sw[i].numpy()) self.assertEqual(int(val_x.shape[0]), 1) self.assertEqual(int(val_y.shape[0]), 1) self.assertEqual(int(val_sw.shape[0]), 1) for i in range(1): # Check that all arrays were shuffled in identical order. self.assertEqual(2 * train_x[i].numpy(), train_y[i].numpy()) self.assertEqual(2 * train_y[i].numpy(), train_sw[i].numpy()) # Check that arrays contain expected values. self.assertEqual( sorted(array_ops.concat([train_x, val_x], axis=0).numpy().tolist()), sorted(ops.convert_to_tensor_v2(x).numpy().tolist())) self.assertEqual( sorted(array_ops.concat([train_y, val_y], axis=0).numpy().tolist()), sorted(ops.convert_to_tensor_v2(y).numpy().tolist())) self.assertEqual( sorted(array_ops.concat([train_sw, val_sw], axis=0).numpy().tolist()), sorted(ops.convert_to_tensor_v2(sw).numpy().tolist())) @parameterized.named_parameters(('numpy_arrays', True), ('tensors', False)) def test_validation_split_unshuffled(self, use_numpy): if use_numpy: x = np.array([0, 1, 2, 3, 4]) y = np.array([0, 2, 4, 6, 8]) sw = np.array([0, 4, 8, 12, 16]) else: x = ops.convert_to_tensor_v2([0, 1, 2, 3, 4]) y = ops.convert_to_tensor_v2([0, 2, 4, 6, 8]) sw = ops.convert_to_tensor_v2([0, 4, 8, 12, 16]) (train_x, train_y, train_sw), (val_x, val_y, val_sw) = ( data_adapter.train_validation_split((x, y, sw), validation_split=0.2, shuffle=False)) self.assertEqual(train_x.numpy().tolist(), [0, 1, 2, 3]) self.assertEqual(train_y.numpy().tolist(), [0, 2, 4, 6]) self.assertEqual(train_sw.numpy().tolist(), [0, 4, 8, 12]) self.assertEqual(val_x.numpy().tolist(), [4]) self.assertEqual(val_y.numpy().tolist(), [8]) self.assertEqual(val_sw.numpy().tolist(), [16]) def test_validation_split_user_error(self): with self.assertRaisesRegexp(ValueError, 'is only supported for Tensors'): data_adapter.train_validation_split( lambda: np.ones((10, 1)), validation_split=0.2) def test_validation_split_examples_too_few(self): with self.assertRaisesRegexp( ValueError, 'not sufficient to split it'): data_adapter.train_validation_split( np.ones((1, 10)), validation_split=0.2) def test_validation_split_none(self): train_sw, val_sw = data_adapter.train_validation_split( None, validation_split=0.2) self.assertIsNone(train_sw) self.assertIsNone(val_sw) (_, train_sw), (_, val_sw) = data_adapter.train_validation_split( (np.ones((10, 1)), None), validation_split=0.2) self.assertIsNone(train_sw) self.assertIsNone(val_sw) class TestUtils(keras_parameterized.TestCase): def test_expand_1d_sparse_tensors_untouched(self): st = sparse_tensor.SparseTensor( indices=[[0], [10]], values=[1, 2], dense_shape=[10]) st = data_adapter.expand_1d(st) self.assertEqual(st.shape.rank, 1) if __name__ == '__main__': ops.enable_eager_execution() test.main()
apache-2.0
swharden/SWHLab
doc/uses/EPSCs-and-IPSCs/smooth histogram method/05.py
1
1812
""" MOST OF THIS CODE IS NOT USED ITS COPY/PASTED AND LEFT HERE FOR CONVENIENCE """ import os import sys # in case our module isn't installed (running from this folder) if not os.path.abspath('../../../') in sys.path: sys.path.append('../../../') # helps spyder get docs import swhlab import swhlab.common as cm import matplotlib.pyplot as plt import numpy as np import warnings # suppress VisibleDeprecationWarning warning warnings.filterwarnings("ignore", category=np.VisibleDeprecationWarning) def analyzeSweep(abf,plotToo=True,color=None,label=None): Y=abf.sweepYsmartbase()[abf.pointsPerSec*.5:] AV,SD=np.average(Y),np.std(Y) dev=5 # number of stdevs from the avg to set the range R1,R2=[(AV-SD)*dev,(AV+SD)*dev] nBins=1000 hist,bins=np.histogram(Y,bins=nBins,range=[R1,R2],density=True) histSmooth=abf.convolve(hist,cm.kernel_gaussian(nBins/5)) if plotToo: plt.plot(bins[1:],hist,'.',color=color,alpha=.2,ms=10) plt.plot(bins[1:],histSmooth,'-',color=color,lw=5,alpha=.5,label=label) return if __name__=="__main__": #abfFile=R"C:\Users\scott\Documents\important\demodata\abfs\16d07022.abf" abfFile=R"X:\Data\2P01\2016\2016-09-01 PIR TGOT\16d07022.abf" abf=swhlab.ABF(abfFile) # prepare figure plt.figure(figsize=(10,10)) plt.grid() plt.title("smart baseline value distribution") plt.xlabel(abf.units2) plt.ylabel("normalized density") # do the analysis abf.kernel=abf.kernel_gaussian(sizeMS=500) abf.setsweep(175) analyzeSweep(abf,color='b',label="baseline") abf.setsweep(200) analyzeSweep(abf,color='g',label="TGOT") abf.setsweep(375) analyzeSweep(abf,color='y',label="washout") # show figure plt.legend() plt.margins(0,.1) plt.show() print("DONE")
mit
zuku1985/scikit-learn
sklearn/preprocessing/tests/test_imputation.py
51
12300
import numpy as np from scipy import sparse from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_false from sklearn.preprocessing.imputation import Imputer from sklearn.pipeline import Pipeline from sklearn.model_selection import GridSearchCV from sklearn import tree from sklearn.random_projection import sparse_random_matrix def _check_statistics(X, X_true, strategy, statistics, missing_values): """Utility function for testing imputation for a given strategy. Test: - along the two axes - with dense and sparse arrays Check that: - the statistics (mean, median, mode) are correct - the missing values are imputed correctly""" err_msg = "Parameters: strategy = %s, missing_values = %s, " \ "axis = {0}, sparse = {1}" % (strategy, missing_values) # Normal matrix, axis = 0 imputer = Imputer(missing_values, strategy=strategy, axis=0) X_trans = imputer.fit(X).transform(X.copy()) assert_array_equal(imputer.statistics_, statistics, err_msg.format(0, False)) assert_array_equal(X_trans, X_true, err_msg.format(0, False)) # Normal matrix, axis = 1 imputer = Imputer(missing_values, strategy=strategy, axis=1) imputer.fit(X.transpose()) if np.isnan(statistics).any(): assert_raises(ValueError, imputer.transform, X.copy().transpose()) else: X_trans = imputer.transform(X.copy().transpose()) assert_array_equal(X_trans, X_true.transpose(), err_msg.format(1, False)) # Sparse matrix, axis = 0 imputer = Imputer(missing_values, strategy=strategy, axis=0) imputer.fit(sparse.csc_matrix(X)) X_trans = imputer.transform(sparse.csc_matrix(X.copy())) if sparse.issparse(X_trans): X_trans = X_trans.toarray() assert_array_equal(imputer.statistics_, statistics, err_msg.format(0, True)) assert_array_equal(X_trans, X_true, err_msg.format(0, True)) # Sparse matrix, axis = 1 imputer = Imputer(missing_values, strategy=strategy, axis=1) imputer.fit(sparse.csc_matrix(X.transpose())) if np.isnan(statistics).any(): assert_raises(ValueError, imputer.transform, sparse.csc_matrix(X.copy().transpose())) else: X_trans = imputer.transform(sparse.csc_matrix(X.copy().transpose())) if sparse.issparse(X_trans): X_trans = X_trans.toarray() assert_array_equal(X_trans, X_true.transpose(), err_msg.format(1, True)) def test_imputation_shape(): # Verify the shapes of the imputed matrix for different strategies. X = np.random.randn(10, 2) X[::2] = np.nan for strategy in ['mean', 'median', 'most_frequent']: imputer = Imputer(strategy=strategy) X_imputed = imputer.fit_transform(X) assert_equal(X_imputed.shape, (10, 2)) X_imputed = imputer.fit_transform(sparse.csr_matrix(X)) assert_equal(X_imputed.shape, (10, 2)) def test_imputation_mean_median_only_zero(): # Test imputation using the mean and median strategies, when # missing_values == 0. X = np.array([ [np.nan, 0, 0, 0, 5], [np.nan, 1, 0, np.nan, 3], [np.nan, 2, 0, 0, 0], [np.nan, 6, 0, 5, 13], ]) X_imputed_mean = np.array([ [3, 5], [1, 3], [2, 7], [6, 13], ]) statistics_mean = [np.nan, 3, np.nan, np.nan, 7] # Behaviour of median with NaN is undefined, e.g. different results in # np.median and np.ma.median X_for_median = X[:, [0, 1, 2, 4]] X_imputed_median = np.array([ [2, 5], [1, 3], [2, 5], [6, 13], ]) statistics_median = [np.nan, 2, np.nan, 5] _check_statistics(X, X_imputed_mean, "mean", statistics_mean, 0) _check_statistics(X_for_median, X_imputed_median, "median", statistics_median, 0) def safe_median(arr, *args, **kwargs): # np.median([]) raises a TypeError for numpy >= 1.10.1 length = arr.size if hasattr(arr, 'size') else len(arr) return np.nan if length == 0 else np.median(arr, *args, **kwargs) def safe_mean(arr, *args, **kwargs): # np.mean([]) raises a RuntimeWarning for numpy >= 1.10.1 length = arr.size if hasattr(arr, 'size') else len(arr) return np.nan if length == 0 else np.mean(arr, *args, **kwargs) def test_imputation_mean_median(): # Test imputation using the mean and median strategies, when # missing_values != 0. rng = np.random.RandomState(0) dim = 10 dec = 10 shape = (dim * dim, dim + dec) zeros = np.zeros(shape[0]) values = np.arange(1, shape[0] + 1) values[4::2] = - values[4::2] tests = [("mean", "NaN", lambda z, v, p: safe_mean(np.hstack((z, v)))), ("mean", 0, lambda z, v, p: np.mean(v)), ("median", "NaN", lambda z, v, p: safe_median(np.hstack((z, v)))), ("median", 0, lambda z, v, p: np.median(v))] for strategy, test_missing_values, true_value_fun in tests: X = np.empty(shape) X_true = np.empty(shape) true_statistics = np.empty(shape[1]) # Create a matrix X with columns # - with only zeros, # - with only missing values # - with zeros, missing values and values # And a matrix X_true containing all true values for j in range(shape[1]): nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1) nb_missing_values = max(shape[0] + dec * dec - (j + dec) * (j + dec), 0) nb_values = shape[0] - nb_zeros - nb_missing_values z = zeros[:nb_zeros] p = np.repeat(test_missing_values, nb_missing_values) v = values[rng.permutation(len(values))[:nb_values]] true_statistics[j] = true_value_fun(z, v, p) # Create the columns X[:, j] = np.hstack((v, z, p)) if 0 == test_missing_values: X_true[:, j] = np.hstack((v, np.repeat( true_statistics[j], nb_missing_values + nb_zeros))) else: X_true[:, j] = np.hstack((v, z, np.repeat(true_statistics[j], nb_missing_values))) # Shuffle them the same way np.random.RandomState(j).shuffle(X[:, j]) np.random.RandomState(j).shuffle(X_true[:, j]) # Mean doesn't support columns containing NaNs, median does if strategy == "median": cols_to_keep = ~np.isnan(X_true).any(axis=0) else: cols_to_keep = ~np.isnan(X_true).all(axis=0) X_true = X_true[:, cols_to_keep] _check_statistics(X, X_true, strategy, true_statistics, test_missing_values) def test_imputation_median_special_cases(): # Test median imputation with sparse boundary cases X = np.array([ [0, np.nan, np.nan], # odd: implicit zero [5, np.nan, np.nan], # odd: explicit nonzero [0, 0, np.nan], # even: average two zeros [-5, 0, np.nan], # even: avg zero and neg [0, 5, np.nan], # even: avg zero and pos [4, 5, np.nan], # even: avg nonzeros [-4, -5, np.nan], # even: avg negatives [-1, 2, np.nan], # even: crossing neg and pos ]).transpose() X_imputed_median = np.array([ [0, 0, 0], [5, 5, 5], [0, 0, 0], [-5, 0, -2.5], [0, 5, 2.5], [4, 5, 4.5], [-4, -5, -4.5], [-1, 2, .5], ]).transpose() statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, .5] _check_statistics(X, X_imputed_median, "median", statistics_median, 'NaN') def test_imputation_most_frequent(): # Test imputation using the most-frequent strategy. X = np.array([ [-1, -1, 0, 5], [-1, 2, -1, 3], [-1, 1, 3, -1], [-1, 2, 3, 7], ]) X_true = np.array([ [2, 0, 5], [2, 3, 3], [1, 3, 3], [2, 3, 7], ]) # scipy.stats.mode, used in Imputer, doesn't return the first most # frequent as promised in the doc but the lowest most frequent. When this # test will fail after an update of scipy, Imputer will need to be updated # to be consistent with the new (correct) behaviour _check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1) def test_imputation_pipeline_grid_search(): # Test imputation within a pipeline + gridsearch. pipeline = Pipeline([('imputer', Imputer(missing_values=0)), ('tree', tree.DecisionTreeRegressor(random_state=0))]) parameters = { 'imputer__strategy': ["mean", "median", "most_frequent"], 'imputer__axis': [0, 1] } l = 100 X = sparse_random_matrix(l, l, density=0.10) Y = sparse_random_matrix(l, 1, density=0.10).toarray() gs = GridSearchCV(pipeline, parameters) gs.fit(X, Y) def test_imputation_pickle(): # Test for pickling imputers. import pickle l = 100 X = sparse_random_matrix(l, l, density=0.10) for strategy in ["mean", "median", "most_frequent"]: imputer = Imputer(missing_values=0, strategy=strategy) imputer.fit(X) imputer_pickled = pickle.loads(pickle.dumps(imputer)) assert_array_equal(imputer.transform(X.copy()), imputer_pickled.transform(X.copy()), "Fail to transform the data after pickling " "(strategy = %s)" % (strategy)) def test_imputation_copy(): # Test imputation with copy X_orig = sparse_random_matrix(5, 5, density=0.75, random_state=0) # copy=True, dense => copy X = X_orig.copy().toarray() imputer = Imputer(missing_values=0, strategy="mean", copy=True) Xt = imputer.fit(X).transform(X) Xt[0, 0] = -1 assert_false(np.all(X == Xt)) # copy=True, sparse csr => copy X = X_orig.copy() imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=True) Xt = imputer.fit(X).transform(X) Xt.data[0] = -1 assert_false(np.all(X.data == Xt.data)) # copy=False, dense => no copy X = X_orig.copy().toarray() imputer = Imputer(missing_values=0, strategy="mean", copy=False) Xt = imputer.fit(X).transform(X) Xt[0, 0] = -1 assert_array_equal(X, Xt) # copy=False, sparse csr, axis=1 => no copy X = X_orig.copy() imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=False, axis=1) Xt = imputer.fit(X).transform(X) Xt.data[0] = -1 assert_array_equal(X.data, Xt.data) # copy=False, sparse csc, axis=0 => no copy X = X_orig.copy().tocsc() imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=False, axis=0) Xt = imputer.fit(X).transform(X) Xt.data[0] = -1 assert_array_equal(X.data, Xt.data) # copy=False, sparse csr, axis=0 => copy X = X_orig.copy() imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=False, axis=0) Xt = imputer.fit(X).transform(X) Xt.data[0] = -1 assert_false(np.all(X.data == Xt.data)) # copy=False, sparse csc, axis=1 => copy X = X_orig.copy().tocsc() imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=False, axis=1) Xt = imputer.fit(X).transform(X) Xt.data[0] = -1 assert_false(np.all(X.data == Xt.data)) # copy=False, sparse csr, axis=1, missing_values=0 => copy X = X_orig.copy() imputer = Imputer(missing_values=0, strategy="mean", copy=False, axis=1) Xt = imputer.fit(X).transform(X) assert_false(sparse.issparse(Xt)) # Note: If X is sparse and if missing_values=0, then a (dense) copy of X is # made, even if copy=False.
bsd-3-clause
aflaxman/scikit-learn
sklearn/decomposition/tests/test_truncated_svd.py
66
8261
"""Test truncated SVD transformer.""" import numpy as np import scipy.sparse as sp from sklearn.decomposition import TruncatedSVD from sklearn.utils import check_random_state from sklearn.utils.testing import (assert_array_almost_equal, assert_equal, assert_raises, assert_greater, assert_array_less) # Make an X that looks somewhat like a small tf-idf matrix. # XXX newer versions of SciPy have scipy.sparse.rand for this. shape = 60, 55 n_samples, n_features = shape rng = check_random_state(42) X = rng.randint(-100, 20, np.product(shape)).reshape(shape) X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64) X.data[:] = 1 + np.log(X.data) Xdense = X.A def test_algorithms(): svd_a = TruncatedSVD(30, algorithm="arpack") svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42) Xa = svd_a.fit_transform(X)[:, :6] Xr = svd_r.fit_transform(X)[:, :6] assert_array_almost_equal(Xa, Xr, decimal=5) comp_a = np.abs(svd_a.components_) comp_r = np.abs(svd_r.components_) # All elements are equal, but some elements are more equal than others. assert_array_almost_equal(comp_a[:9], comp_r[:9]) assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=2) def test_attributes(): for n_components in (10, 25, 41): tsvd = TruncatedSVD(n_components).fit(X) assert_equal(tsvd.n_components, n_components) assert_equal(tsvd.components_.shape, (n_components, n_features)) def test_too_many_components(): for algorithm in ["arpack", "randomized"]: for n_components in (n_features, n_features + 1): tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm) assert_raises(ValueError, tsvd.fit, X) def test_sparse_formats(): for fmt in ("array", "csr", "csc", "coo", "lil"): Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)() tsvd = TruncatedSVD(n_components=11) Xtrans = tsvd.fit_transform(Xfmt) assert_equal(Xtrans.shape, (n_samples, 11)) Xtrans = tsvd.transform(Xfmt) assert_equal(Xtrans.shape, (n_samples, 11)) def test_inverse_transform(): for algo in ("arpack", "randomized"): # We need a lot of components for the reconstruction to be "almost # equal" in all positions. XXX Test means or sums instead? tsvd = TruncatedSVD(n_components=52, random_state=42, algorithm=algo) Xt = tsvd.fit_transform(X) Xinv = tsvd.inverse_transform(Xt) assert_array_almost_equal(Xinv, Xdense, decimal=1) def test_integers(): Xint = X.astype(np.int64) tsvd = TruncatedSVD(n_components=6) Xtrans = tsvd.fit_transform(Xint) assert_equal(Xtrans.shape, (n_samples, tsvd.n_components)) def test_explained_variance(): # Test sparse data svd_a_10_sp = TruncatedSVD(10, algorithm="arpack") svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42) svd_a_20_sp = TruncatedSVD(20, algorithm="arpack") svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42) X_trans_a_10_sp = svd_a_10_sp.fit_transform(X) X_trans_r_10_sp = svd_r_10_sp.fit_transform(X) X_trans_a_20_sp = svd_a_20_sp.fit_transform(X) X_trans_r_20_sp = svd_r_20_sp.fit_transform(X) # Test dense data svd_a_10_de = TruncatedSVD(10, algorithm="arpack") svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42) svd_a_20_de = TruncatedSVD(20, algorithm="arpack") svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42) X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray()) X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray()) X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray()) X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray()) # helper arrays for tests below svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de, svd_r_10_de, svd_a_20_de, svd_r_20_de) svds_trans = ( (svd_a_10_sp, X_trans_a_10_sp), (svd_r_10_sp, X_trans_r_10_sp), (svd_a_20_sp, X_trans_a_20_sp), (svd_r_20_sp, X_trans_r_20_sp), (svd_a_10_de, X_trans_a_10_de), (svd_r_10_de, X_trans_r_10_de), (svd_a_20_de, X_trans_a_20_de), (svd_r_20_de, X_trans_r_20_de), ) svds_10_v_20 = ( (svd_a_10_sp, svd_a_20_sp), (svd_r_10_sp, svd_r_20_sp), (svd_a_10_de, svd_a_20_de), (svd_r_10_de, svd_r_20_de), ) svds_sparse_v_dense = ( (svd_a_10_sp, svd_a_10_de), (svd_a_20_sp, svd_a_20_de), (svd_r_10_sp, svd_r_10_de), (svd_r_20_sp, svd_r_20_de), ) # Assert the 1st component is equal for svd_10, svd_20 in svds_10_v_20: assert_array_almost_equal( svd_10.explained_variance_ratio_, svd_20.explained_variance_ratio_[:10], decimal=5, ) # Assert that 20 components has higher explained variance than 10 for svd_10, svd_20 in svds_10_v_20: assert_greater( svd_20.explained_variance_ratio_.sum(), svd_10.explained_variance_ratio_.sum(), ) # Assert that all the values are greater than 0 for svd in svds: assert_array_less(0.0, svd.explained_variance_ratio_) # Assert that total explained variance is less than 1 for svd in svds: assert_array_less(svd.explained_variance_ratio_.sum(), 1.0) # Compare sparse vs. dense for svd_sparse, svd_dense in svds_sparse_v_dense: assert_array_almost_equal(svd_sparse.explained_variance_ratio_, svd_dense.explained_variance_ratio_) # Test that explained_variance is correct for svd, transformed in svds_trans: total_variance = np.var(X.toarray(), axis=0).sum() variances = np.var(transformed, axis=0) true_explained_variance_ratio = variances / total_variance assert_array_almost_equal( svd.explained_variance_ratio_, true_explained_variance_ratio, ) def test_singular_values(): # Check that the TruncatedSVD output has the correct singular values rng = np.random.RandomState(0) n_samples = 100 n_features = 80 X = rng.randn(n_samples, n_features) apca = TruncatedSVD(n_components=2, algorithm='arpack', random_state=rng).fit(X) rpca = TruncatedSVD(n_components=2, algorithm='arpack', random_state=rng).fit(X) assert_array_almost_equal(apca.singular_values_, rpca.singular_values_, 12) # Compare to the Frobenius norm X_apca = apca.transform(X) X_rpca = rpca.transform(X) assert_array_almost_equal(np.sum(apca.singular_values_**2.0), np.linalg.norm(X_apca, "fro")**2.0, 12) assert_array_almost_equal(np.sum(rpca.singular_values_**2.0), np.linalg.norm(X_rpca, "fro")**2.0, 12) # Compare to the 2-norms of the score vectors assert_array_almost_equal(apca.singular_values_, np.sqrt(np.sum(X_apca**2.0, axis=0)), 12) assert_array_almost_equal(rpca.singular_values_, np.sqrt(np.sum(X_rpca**2.0, axis=0)), 12) # Set the singular values and see what we get back rng = np.random.RandomState(0) n_samples = 100 n_features = 110 X = rng.randn(n_samples, n_features) apca = TruncatedSVD(n_components=3, algorithm='arpack', random_state=rng) rpca = TruncatedSVD(n_components=3, algorithm='randomized', random_state=rng) X_apca = apca.fit_transform(X) X_rpca = rpca.fit_transform(X) X_apca /= np.sqrt(np.sum(X_apca**2.0, axis=0)) X_rpca /= np.sqrt(np.sum(X_rpca**2.0, axis=0)) X_apca[:, 0] *= 3.142 X_apca[:, 1] *= 2.718 X_rpca[:, 0] *= 3.142 X_rpca[:, 1] *= 2.718 X_hat_apca = np.dot(X_apca, apca.components_) X_hat_rpca = np.dot(X_rpca, rpca.components_) apca.fit(X_hat_apca) rpca.fit(X_hat_rpca) assert_array_almost_equal(apca.singular_values_, [3.142, 2.718, 1.0], 14) assert_array_almost_equal(rpca.singular_values_, [3.142, 2.718, 1.0], 14)
bsd-3-clause
licco/zipline
zipline/history/history_container.py
1
18509
# # Copyright 2014 Quantopian, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from itertools import groupby import numpy as np import pandas as pd from six import itervalues, iteritems, iterkeys from . history import ( index_at_dt, ) from zipline.utils.data import RollingPanel # The closing price is referred to by multiple names, # allow both for price rollover logic etc. CLOSING_PRICE_FIELDS = frozenset({'price', 'close_price'}) def ffill_buffer_from_prior_values(field, buffer_frame, digest_frame, pre_digest_values): """ Forward-fill a buffer frame, falling back to the end-of-period values of a digest frame if the buffer frame has leading NaNs. """ # Get values which are NaN at the beginning of the period. first_bar = buffer_frame.iloc[0] def iter_nan_sids(): """ Helper for iterating over the remaining nan sids in first_bar. """ return (sid for sid in first_bar[first_bar.isnull()].index) # Try to fill with the last entry from the digest frame. if digest_frame is not None: # We don't store a digest frame for frequencies that only have a bar # count of 1. for sid in iter_nan_sids(): buffer_frame[sid][0] = digest_frame.ix[-1, sid] # If we still have nan sids, try to fill with pre_digest_values. for sid in iter_nan_sids(): prior_sid_value = pre_digest_values[field].get(sid) if prior_sid_value: # If the prior value is greater than the timestamp of our first # bar. if prior_sid_value.get('dt', first_bar.name) > first_bar.name: buffer_frame[sid][0] = prior_sid_value.get('value', np.nan) return buffer_frame.ffill() def ffill_digest_frame_from_prior_values(field, digest_frame, prior_values): """ Forward-fill a digest frame, falling back to the last known priof values if necessary. """ if digest_frame is not None: # Digest frame is None in the case that we only have length 1 history # specs for a given frequency. # It's possible that the first bar in our digest frame is storing NaN # values. If so, check if we've tracked an older value and use that as # an ffill value for the first bar. first_bar = digest_frame.ix[0] nan_sids = first_bar[first_bar.isnull()].index for sid in nan_sids: try: # Only use prior value if it is before the index, # so that a backfill does not accidentally occur. if prior_values[field][sid]['dt'] <= digest_frame.index[0]: digest_frame[sid][0] = prior_values[field][sid]['value'] except KeyError: # Allow case where there is no previous value. # e.g. with leading nans. pass digest_frame = digest_frame.ffill() return digest_frame def freq_str_and_bar_count(history_spec): """ Helper for getting the frequency string and bar count from a history spec. """ return (history_spec.frequency.freq_str, history_spec.bar_count) def group_by_frequency(history_specs): """ Takes an iterable of history specs and returns a dictionary mapping unique frequencies to a list of specs with that frequency. Within each list, the HistorySpecs are sorted by ascending bar count. Example: [HistorySpec(3, '1d', 'price', True), HistorySpec(2, '2d', 'open', True), HistorySpec(2, '1d', 'open', False), HistorySpec(5, '1m', 'open', True)] yields {Frequency('1d') : [HistorySpec(2, '1d', 'open', False)], HistorySpec(3, '1d', 'price', True), Frequency('2d') : [HistorySpec(2, '2d', 'open', True)], Frequency('1m') : [HistorySpec(5, '1m', 'open', True)]} """ return {key: list(group) for key, group in groupby( sorted(history_specs, key=freq_str_and_bar_count), key=lambda spec: spec.frequency)} class HistoryContainer(object): """ Container for all history panels and frames used by an algoscript. To be used internally by TradingAlgorithm, but *not* passed directly to the algorithm. Entry point for the algoscript is the result of `get_history`. """ def __init__(self, history_specs, initial_sids, initial_dt): # History specs to be served by this container. self.history_specs = history_specs self.frequency_groups = \ group_by_frequency(itervalues(self.history_specs)) # The set of fields specified by all history specs self.fields = set(spec.field for spec in itervalues(history_specs)) # This panel contains raw minutes for periods that haven't been fully # completed. When a frequency period rolls over, these minutes are # digested using some sort of aggregation call on the panel (e.g. `sum` # for volume, `max` for high, `min` for low, etc.). self.buffer_panel = self.create_buffer_panel( initial_sids, initial_dt, ) # Dictionaries with Frequency objects as keys. self.digest_panels, self.cur_window_starts, self.cur_window_closes = \ self.create_digest_panels(initial_sids, initial_dt) # Populating initial frames here, so that the cost of creating the # initial frames does not show up when profiling. These frames are # cached since mid-stream creation of containing data frames on every # bar is expensive. self.create_return_frames(initial_dt) # Helps prop up the prior day panel against having a nan, when the data # has been seen. self.last_known_prior_values = {field: {} for field in self.fields} @property def unique_frequencies(self): """ Return an iterator over all the unique frequencies serviced by this container. """ return iterkeys(self.frequency_groups) def create_digest_panels(self, initial_sids, initial_dt): """ Initialize a RollingPanel for each unique panel frequency being stored by this container. Each RollingPanel pre-allocates enough storage space to service the highest bar-count of any history call that it serves. Relies on the fact that group_by_frequency sorts the value lists by ascending bar count. """ # Map from frequency -> first/last minute of the next digest to be # rolled for that frequency. first_window_starts = {} first_window_closes = {} # Map from frequency -> digest_panels. panels = {} for freq, specs in iteritems(self.frequency_groups): # Relying on the sorting of group_by_frequency to get the spec # requiring the largest number of bars. largest_spec = specs[-1] if largest_spec.bar_count == 1: # No need to allocate a digest panel; this frequency will only # ever use data drawn from self.buffer_panel. first_window_starts[freq] = freq.window_open(initial_dt) first_window_closes[freq] = freq.window_close( first_window_starts[freq] ) continue initial_dates = index_at_dt(largest_spec, initial_dt) # Set up dates for our first digest roll, which is keyed to the # close of the first entry in our initial index. first_window_closes[freq] = initial_dates[0] first_window_starts[freq] = freq.window_open(initial_dates[0]) rp = RollingPanel(len(initial_dates) - 1, self.fields, initial_sids) panels[freq] = rp return panels, first_window_starts, first_window_closes def create_buffer_panel(self, initial_sids, initial_dt): """ Initialize a RollingPanel containing enough minutes to service all our frequencies. """ max_bars_needed = max(freq.max_minutes for freq in self.unique_frequencies) rp = RollingPanel( max_bars_needed, self.fields, initial_sids, # Restrict the initial data down to just the fields being used in # this container. ) return rp def convert_columns(self, values): """ If columns have a specific type you want to enforce, overwrite this method and return the transformed values. """ return values def create_return_frames(self, algo_dt): """ Populates the return frame cache. Called during init and at universe rollovers. """ self.return_frames = {} for spec_key, history_spec in iteritems(self.history_specs): index = pd.to_datetime(index_at_dt(history_spec, algo_dt)) frame = pd.DataFrame( index=index, columns=self.convert_columns( self.buffer_panel.minor_axis.values), dtype=np.float64) self.return_frames[spec_key] = frame def buffer_panel_minutes(self, buffer_panel=None, earliest_minute=None, latest_minute=None): """ Get the minutes in @buffer_panel between @earliest_minute and @last_minute, inclusive. @buffer_panel can be a RollingPanel or a plain Panel. If a RollingPanel is supplied, we call `get_current` to extract a Panel object. If no panel is supplied, we use self.buffer_panel. If no value is specified for @earliest_minute, use all the minutes we have up until @latest minute. If no value for @latest_minute is specified, use all values up until the latest minute. """ buffer_panel = buffer_panel or self.buffer_panel if isinstance(buffer_panel, RollingPanel): buffer_panel = buffer_panel.get_current() return buffer_panel.ix[:, earliest_minute:latest_minute, :] def update(self, data, algo_dt): """ Takes the bar at @algo_dt's @data, checks to see if we need to roll any new digests, then adds new data to the buffer panel. """ self.update_digest_panels(algo_dt, self.buffer_panel) fields = self.fields frame = pd.DataFrame( {sid: {field: bar[field] for field in fields} for sid, bar in data.iteritems() if (bar and bar['dt'] == algo_dt and # Only use data which is keyed in the data panel. # Prevents crashes due to custom data. sid in self.buffer_panel.minor_axis)}) self.buffer_panel.add_frame(algo_dt, frame) def update_digest_panels(self, algo_dt, buffer_panel, freq_filter=None): """ Check whether @algo_dt is greater than cur_window_close for any of our frequencies. If so, roll a digest for that frequency using data drawn from @buffer panel and insert it into the appropriate digest panels. If @freq_filter is specified, only use the given data to update frequencies on which the filter returns True. """ for frequency in self.unique_frequencies: if freq_filter is not None and not freq_filter(frequency): continue # We don't keep a digest panel if we only have a length-1 history # spec for a given frequency digest_panel = self.digest_panels.get(frequency, None) while algo_dt > self.cur_window_closes[frequency]: earliest_minute = self.cur_window_starts[frequency] latest_minute = self.cur_window_closes[frequency] minutes_to_process = self.buffer_panel_minutes( buffer_panel, earliest_minute=earliest_minute, latest_minute=latest_minute, ) # Create a digest from minutes_to_process and add it to # digest_panel. self.roll(frequency, digest_panel, minutes_to_process, latest_minute) # Update panel start/close for this frequency. self.cur_window_starts[frequency] = \ frequency.next_window_start(latest_minute) self.cur_window_closes[frequency] = \ frequency.window_close(self.cur_window_starts[frequency]) def roll(self, frequency, digest_panel, buffer_minutes, digest_dt): """ Package up minutes in @buffer_minutes insert that bar into @digest_panel at index @last_minute, and update self.cur_window_{starts|closes} for the given frequency. """ if digest_panel is None: # This happens if the only spec we have at this frequency has a bar # count of 1. return rolled = pd.DataFrame( index=self.fields, columns=buffer_minutes.minor_axis) for field in self.fields: if field in CLOSING_PRICE_FIELDS: # Use the last close, or NaN if we have no minutes. try: prices = buffer_minutes.loc[field].ffill().iloc[-1] except IndexError: # Scalar assignment sets the value for all entries. prices = np.nan rolled.ix[field] = prices elif field == 'open_price': # Use the first open, or NaN if we have no minutes. try: opens = buffer_minutes.loc[field].bfill().iloc[0] except IndexError: # Scalar assignment sets the value for all entries. opens = np.nan rolled.ix['open_price'] = opens elif field == 'volume': # Volume is the sum of the volumes during the # course of the period. volumes = buffer_minutes.ix['volume'].sum().fillna(0) rolled.ix['volume'] = volumes elif field == 'high': # Use the highest high. highs = buffer_minutes.ix['high'].max() rolled.ix['high'] = highs elif field == 'low': # Use the lowest low. lows = buffer_minutes.ix['low'].min() rolled.ix['low'] = lows for sid, value in rolled.ix[field].iterkv(): if not np.isnan(value): try: prior_values = \ self.last_known_prior_values[field][sid] except KeyError: prior_values = {} self.last_known_prior_values[field][sid] = \ prior_values prior_values['dt'] = digest_dt prior_values['value'] = value digest_panel.add_frame(digest_dt, rolled) def get_history(self, history_spec, algo_dt): """ Main API used by the algoscript is mapped to this function. Selects from the overarching history panel the values for the @history_spec at the given @algo_dt. """ field = history_spec.field bar_count = history_spec.bar_count do_ffill = history_spec.ffill index = pd.to_datetime(index_at_dt(history_spec, algo_dt)) return_frame = self.return_frames[history_spec.key_str] # Overwrite the index. # Not worrying about values here since the values are overwritten # in the next step. return_frame.index = index if bar_count > 1: # Get the last bar_count - 1 frames from our stored historical # frames. digest_panel = self.digest_panels[history_spec.frequency]\ .get_current() digest_frame = digest_panel[field].copy().ix[1 - bar_count:] else: digest_frame = None # Get minutes from our buffer panel to build the last row. buffer_frame = self.buffer_panel_minutes( earliest_minute=self.cur_window_starts[history_spec.frequency], )[field] if do_ffill: digest_frame = ffill_digest_frame_from_prior_values( field, digest_frame, self.last_known_prior_values, ) buffer_frame = ffill_buffer_from_prior_values( field, buffer_frame, digest_frame, self.last_known_prior_values, ) if digest_frame is not None: return_frame.ix[:-1] = digest_frame.ix[:] if field == 'volume': return_frame.ix[algo_dt] = buffer_frame.fillna(0).sum() elif field == 'high': return_frame.ix[algo_dt] = buffer_frame.max() elif field == 'low': return_frame.ix[algo_dt] = buffer_frame.min() elif field == 'open_price': return_frame.ix[algo_dt] = buffer_frame.iloc[0] else: return_frame.ix[algo_dt] = buffer_frame.loc[algo_dt] # Returning a copy of the DataFrame so that we don't crash if the user # adds columns to the frame. Ideally we would just drop any added # columns, but pandas 0.12.0 doesn't support in-place dropping of # columns. We should re-evaluate this implementation once we're on a # more up-to-date pandas. return return_frame.copy()
apache-2.0
Jim61C/VTT_Show_Atten_And_Tell
prepro.py
4
8670
from scipy import ndimage from collections import Counter from core.vggnet import Vgg19 from core.utils import * import tensorflow as tf import numpy as np import pandas as pd import hickle import os import json def _process_caption_data(caption_file, image_dir, max_length): with open(caption_file) as f: caption_data = json.load(f) # id_to_filename is a dictionary such as {image_id: filename]} id_to_filename = {image['id']: image['file_name'] for image in caption_data['images']} # data is a list of dictionary which contains 'captions', 'file_name' and 'image_id' as key. data = [] for annotation in caption_data['annotations']: image_id = annotation['image_id'] annotation['file_name'] = os.path.join(image_dir, id_to_filename[image_id]) data += [annotation] # convert to pandas dataframe (for later visualization or debugging) caption_data = pd.DataFrame.from_dict(data) del caption_data['id'] caption_data.sort_values(by='image_id', inplace=True) caption_data = caption_data.reset_index(drop=True) del_idx = [] for i, caption in enumerate(caption_data['caption']): caption = caption.replace('.','').replace(',','').replace("'","").replace('"','') caption = caption.replace('&','and').replace('(','').replace(")","").replace('-',' ') caption = " ".join(caption.split()) # replace multiple spaces caption_data.set_value(i, 'caption', caption.lower()) if len(caption.split(" ")) > max_length: del_idx.append(i) # delete captions if size is larger than max_length print "The number of captions before deletion: %d" %len(caption_data) caption_data = caption_data.drop(caption_data.index[del_idx]) caption_data = caption_data.reset_index(drop=True) print "The number of captions after deletion: %d" %len(caption_data) return caption_data def _build_vocab(annotations, threshold=1): counter = Counter() max_len = 0 for i, caption in enumerate(annotations['caption']): words = caption.split(' ') # caption contrains only lower-case words for w in words: counter[w] +=1 if len(caption.split(" ")) > max_len: max_len = len(caption.split(" ")) vocab = [word for word in counter if counter[word] >= threshold] print ('Filtered %d words to %d words with word count threshold %d.' % (len(counter), len(vocab), threshold)) word_to_idx = {u'<NULL>': 0, u'<START>': 1, u'<END>': 2} idx = 3 for word in vocab: word_to_idx[word] = idx idx += 1 print "Max length of caption: ", max_len return word_to_idx def _build_caption_vector(annotations, word_to_idx, max_length=15): n_examples = len(annotations) captions = np.ndarray((n_examples,max_length+2)).astype(np.int32) for i, caption in enumerate(annotations['caption']): words = caption.split(" ") # caption contrains only lower-case words cap_vec = [] cap_vec.append(word_to_idx['<START>']) for word in words: if word in word_to_idx: cap_vec.append(word_to_idx[word]) cap_vec.append(word_to_idx['<END>']) # pad short caption with the special null token '<NULL>' to make it fixed-size vector if len(cap_vec) < (max_length + 2): for j in range(max_length + 2 - len(cap_vec)): cap_vec.append(word_to_idx['<NULL>']) captions[i, :] = np.asarray(cap_vec) print "Finished building caption vectors" return captions def _build_file_names(annotations): image_file_names = [] id_to_idx = {} idx = 0 image_ids = annotations['image_id'] file_names = annotations['file_name'] for image_id, file_name in zip(image_ids, file_names): if not image_id in id_to_idx: id_to_idx[image_id] = idx image_file_names.append(file_name) idx += 1 file_names = np.asarray(image_file_names) return file_names, id_to_idx def _build_image_idxs(annotations, id_to_idx): image_idxs = np.ndarray(len(annotations), dtype=np.int32) image_ids = annotations['image_id'] for i, image_id in enumerate(image_ids): image_idxs[i] = id_to_idx[image_id] return image_idxs def main(): # batch size for extracting feature vectors from vggnet. batch_size = 100 # maximum length of caption(number of word). if caption is longer than max_length, deleted. max_length = 15 # if word occurs less than word_count_threshold in training dataset, the word index is special unknown token. word_count_threshold = 1 # vgg model path vgg_model_path = './data/imagenet-vgg-verydeep-19.mat' caption_file = 'data/annotations/captions_train2014.json' image_dir = 'image/%2014_resized/' # about 80000 images and 400000 captions for train dataset train_dataset = _process_caption_data(caption_file='data/annotations/captions_train2014.json', image_dir='image/train2014_resized/', max_length=max_length) # about 40000 images and 200000 captions val_dataset = _process_caption_data(caption_file='data/annotations/captions_val2014.json', image_dir='image/val2014_resized/', max_length=max_length) # about 4000 images and 20000 captions for val / test dataset val_cutoff = int(0.1 * len(val_dataset)) test_cutoff = int(0.2 * len(val_dataset)) print 'Finished processing caption data' save_pickle(train_dataset, 'data/train/train.annotations.pkl') save_pickle(val_dataset[:val_cutoff], 'data/val/val.annotations.pkl') save_pickle(val_dataset[val_cutoff:test_cutoff].reset_index(drop=True), 'data/test/test.annotations.pkl') for split in ['train', 'val', 'test']: annotations = load_pickle('./data/%s/%s.annotations.pkl' % (split, split)) if split == 'train': word_to_idx = _build_vocab(annotations=annotations, threshold=word_count_threshold) save_pickle(word_to_idx, './data/%s/word_to_idx.pkl' % split) captions = _build_caption_vector(annotations=annotations, word_to_idx=word_to_idx, max_length=max_length) save_pickle(captions, './data/%s/%s.captions.pkl' % (split, split)) file_names, id_to_idx = _build_file_names(annotations) save_pickle(file_names, './data/%s/%s.file.names.pkl' % (split, split)) image_idxs = _build_image_idxs(annotations, id_to_idx) save_pickle(image_idxs, './data/%s/%s.image.idxs.pkl' % (split, split)) # prepare reference captions to compute bleu scores later image_ids = {} feature_to_captions = {} i = -1 for caption, image_id in zip(annotations['caption'], annotations['image_id']): if not image_id in image_ids: image_ids[image_id] = 0 i += 1 feature_to_captions[i] = [] feature_to_captions[i].append(caption.lower() + ' .') save_pickle(feature_to_captions, './data/%s/%s.references.pkl' % (split, split)) print "Finished building %s caption dataset" %split # extract conv5_3 feature vectors vggnet = Vgg19(vgg_model_path) vggnet.build() with tf.Session() as sess: tf.initialize_all_variables().run() for split in ['train', 'val', 'test']: anno_path = './data/%s/%s.annotations.pkl' % (split, split) save_path = './data/%s/%s.features.hkl' % (split, split) annotations = load_pickle(anno_path) image_path = list(annotations['file_name'].unique()) n_examples = len(image_path) all_feats = np.ndarray([n_examples, 196, 512], dtype=np.float32) for start, end in zip(range(0, n_examples, batch_size), range(batch_size, n_examples + batch_size, batch_size)): image_batch_file = image_path[start:end] image_batch = np.array(map(lambda x: ndimage.imread(x, mode='RGB'), image_batch_file)).astype( np.float32) feats = sess.run(vggnet.features, feed_dict={vggnet.images: image_batch}) all_feats[start:end, :] = feats print ("Processed %d %s features.." % (end, split)) # use hickle to save huge feature vectors hickle.dump(all_feats, save_path) print ("Saved %s.." % (save_path)) if __name__ == "__main__": main()
mit
johankaito/fufuka
microblog/flask/venv/lib/python2.7/site-packages/scipy/stats/_multivariate.py
17
69089
# # Author: Joris Vankerschaver 2013 # from __future__ import division, print_function, absolute_import import numpy as np import scipy.linalg from scipy.misc import doccer from scipy.special import gammaln, psi, multigammaln from scipy._lib._util import check_random_state __all__ = ['multivariate_normal', 'dirichlet', 'wishart', 'invwishart'] _LOG_2PI = np.log(2 * np.pi) _LOG_2 = np.log(2) _LOG_PI = np.log(np.pi) def _process_parameters(dim, mean, cov): """ Infer dimensionality from mean or covariance matrix, ensure that mean and covariance are full vector resp. matrix. """ # Try to infer dimensionality if dim is None: if mean is None: if cov is None: dim = 1 else: cov = np.asarray(cov, dtype=float) if cov.ndim < 2: dim = 1 else: dim = cov.shape[0] else: mean = np.asarray(mean, dtype=float) dim = mean.size else: if not np.isscalar(dim): raise ValueError("Dimension of random variable must be a scalar.") # Check input sizes and return full arrays for mean and cov if necessary if mean is None: mean = np.zeros(dim) mean = np.asarray(mean, dtype=float) if cov is None: cov = 1.0 cov = np.asarray(cov, dtype=float) if dim == 1: mean.shape = (1,) cov.shape = (1, 1) if mean.ndim != 1 or mean.shape[0] != dim: raise ValueError("Array 'mean' must be a vector of length %d." % dim) if cov.ndim == 0: cov = cov * np.eye(dim) elif cov.ndim == 1: cov = np.diag(cov) elif cov.ndim == 2 and cov.shape != (dim, dim): rows, cols = cov.shape if rows != cols: msg = ("Array 'cov' must be square if it is two dimensional," " but cov.shape = %s." % str(cov.shape)) else: msg = ("Dimension mismatch: array 'cov' is of shape %s," " but 'mean' is a vector of length %d.") msg = msg % (str(cov.shape), len(mean)) raise ValueError(msg) elif cov.ndim > 2: raise ValueError("Array 'cov' must be at most two-dimensional," " but cov.ndim = %d" % cov.ndim) return dim, mean, cov def _process_quantiles(x, dim): """ Adjust quantiles array so that last axis labels the components of each data point. """ x = np.asarray(x, dtype=float) if x.ndim == 0: x = x[np.newaxis] elif x.ndim == 1: if dim == 1: x = x[:, np.newaxis] else: x = x[np.newaxis, :] return x def _squeeze_output(out): """ Remove single-dimensional entries from array and convert to scalar, if necessary. """ out = out.squeeze() if out.ndim == 0: out = out[()] return out def _eigvalsh_to_eps(spectrum, cond=None, rcond=None): """ Determine which eigenvalues are "small" given the spectrum. This is for compatibility across various linear algebra functions that should agree about whether or not a Hermitian matrix is numerically singular and what is its numerical matrix rank. This is designed to be compatible with scipy.linalg.pinvh. Parameters ---------- spectrum : 1d ndarray Array of eigenvalues of a Hermitian matrix. cond, rcond : float, optional Cutoff for small eigenvalues. Singular values smaller than rcond * largest_eigenvalue are considered zero. If None or -1, suitable machine precision is used. Returns ------- eps : float Magnitude cutoff for numerical negligibility. """ if rcond is not None: cond = rcond if cond in [None, -1]: t = spectrum.dtype.char.lower() factor = {'f': 1E3, 'd': 1E6} cond = factor[t] * np.finfo(t).eps eps = cond * np.max(abs(spectrum)) return eps def _pinv_1d(v, eps=1e-5): """ A helper function for computing the pseudoinverse. Parameters ---------- v : iterable of numbers This may be thought of as a vector of eigenvalues or singular values. eps : float Values with magnitude no greater than eps are considered negligible. Returns ------- v_pinv : 1d float ndarray A vector of pseudo-inverted numbers. """ return np.array([0 if abs(x) <= eps else 1/x for x in v], dtype=float) class _PSD(object): """ Compute coordinated functions of a symmetric positive semidefinite matrix. This class addresses two issues. Firstly it allows the pseudoinverse, the logarithm of the pseudo-determinant, and the rank of the matrix to be computed using one call to eigh instead of three. Secondly it allows these functions to be computed in a way that gives mutually compatible results. All of the functions are computed with a common understanding as to which of the eigenvalues are to be considered negligibly small. The functions are designed to coordinate with scipy.linalg.pinvh() but not necessarily with np.linalg.det() or with np.linalg.matrix_rank(). Parameters ---------- M : array_like Symmetric positive semidefinite matrix (2-D). cond, rcond : float, optional Cutoff for small eigenvalues. Singular values smaller than rcond * largest_eigenvalue are considered zero. If None or -1, suitable machine precision is used. lower : bool, optional Whether the pertinent array data is taken from the lower or upper triangle of M. (Default: lower) check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. allow_singular : bool, optional Whether to allow a singular matrix. (Default: True) Notes ----- The arguments are similar to those of scipy.linalg.pinvh(). """ def __init__(self, M, cond=None, rcond=None, lower=True, check_finite=True, allow_singular=True): # Compute the symmetric eigendecomposition. # Note that eigh takes care of array conversion, chkfinite, # and assertion that the matrix is square. s, u = scipy.linalg.eigh(M, lower=lower, check_finite=check_finite) eps = _eigvalsh_to_eps(s, cond, rcond) if np.min(s) < -eps: raise ValueError('the input matrix must be positive semidefinite') d = s[s > eps] if len(d) < len(s) and not allow_singular: raise np.linalg.LinAlgError('singular matrix') s_pinv = _pinv_1d(s, eps) U = np.multiply(u, np.sqrt(s_pinv)) # Initialize the eagerly precomputed attributes. self.rank = len(d) self.U = U self.log_pdet = np.sum(np.log(d)) # Initialize an attribute to be lazily computed. self._pinv = None @property def pinv(self): if self._pinv is None: self._pinv = np.dot(self.U, self.U.T) return self._pinv _doc_default_callparams = """\ mean : array_like, optional Mean of the distribution (default zero) cov : array_like, optional Covariance matrix of the distribution (default one) allow_singular : bool, optional Whether to allow a singular covariance matrix. (Default: False) """ _doc_callparams_note = \ """Setting the parameter `mean` to `None` is equivalent to having `mean` be the zero-vector. The parameter `cov` can be a scalar, in which case the covariance matrix is the identity times that value, a vector of diagonal entries for the covariance matrix, or a two-dimensional array_like. """ _doc_random_state = """\ random_state : None or int or np.random.RandomState instance, optional If int or RandomState, use it for drawing the random variates. If None (or np.random), the global np.random state is used. Default is None. """ _doc_frozen_callparams = "" _doc_frozen_callparams_note = \ """See class definition for a detailed description of parameters.""" docdict_params = { '_doc_default_callparams': _doc_default_callparams, '_doc_callparams_note': _doc_callparams_note, '_doc_random_state': _doc_random_state } docdict_noparams = { '_doc_default_callparams': _doc_frozen_callparams, '_doc_callparams_note': _doc_frozen_callparams_note, '_doc_random_state': _doc_random_state } class multi_rv_generic(object): """ Class which encapsulates common functionality between all multivariate distributions. """ def __init__(self, seed=None): super(multi_rv_generic, self).__init__() self._random_state = check_random_state(seed) @property def random_state(self): """ Get or set the RandomState object for generating random variates. This can be either None or an existing RandomState object. If None (or np.random), use the RandomState singleton used by np.random. If already a RandomState instance, use it. If an int, use a new RandomState instance seeded with seed. """ return self._random_state @random_state.setter def random_state(self, seed): self._random_state = check_random_state(seed) def _get_random_state(self, random_state): if random_state is not None: return check_random_state(random_state) else: return self._random_state class multi_rv_frozen(object): """ Class which encapsulates common functionality between all frozen multivariate distributions. """ @property def random_state(self): return self._dist._random_state @random_state.setter def random_state(self, seed): self._dist._random_state = check_random_state(seed) class multivariate_normal_gen(multi_rv_generic): r""" A multivariate normal random variable. The `mean` keyword specifies the mean. The `cov` keyword specifies the covariance matrix. Methods ------- ``pdf(x, mean=None, cov=1, allow_singular=False)`` Probability density function. ``logpdf(x, mean=None, cov=1, allow_singular=False)`` Log of the probability density function. ``rvs(mean=None, cov=1, size=1, random_state=None)`` Draw random samples from a multivariate normal distribution. ``entropy()`` Compute the differential entropy of the multivariate normal. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. %(_doc_default_callparams)s %(_doc_random_state)s Alternatively, the object may be called (as a function) to fix the mean and covariance parameters, returning a "frozen" multivariate normal random variable: rv = multivariate_normal(mean=None, cov=1, allow_singular=False) - Frozen object with the same methods but holding the given mean and covariance fixed. Notes ----- %(_doc_callparams_note)s The covariance matrix `cov` must be a (symmetric) positive semi-definite matrix. The determinant and inverse of `cov` are computed as the pseudo-determinant and pseudo-inverse, respectively, so that `cov` does not need to have full rank. The probability density function for `multivariate_normal` is .. math:: f(x) = \frac{1}{\sqrt{(2 \pi)^k \det \Sigma}} \exp\left( -\frac{1}{2} (x - \mu)^T \Sigma^{-1} (x - \mu) \right), where :math:`\mu` is the mean, :math:`\Sigma` the covariance matrix, and :math:`k` is the dimension of the space where :math:`x` takes values. .. versionadded:: 0.14.0 Examples -------- >>> import matplotlib.pyplot as plt >>> from scipy.stats import multivariate_normal >>> x = np.linspace(0, 5, 10, endpoint=False) >>> y = multivariate_normal.pdf(x, mean=2.5, cov=0.5); y array([ 0.00108914, 0.01033349, 0.05946514, 0.20755375, 0.43939129, 0.56418958, 0.43939129, 0.20755375, 0.05946514, 0.01033349]) >>> fig1 = plt.figure() >>> ax = fig1.add_subplot(111) >>> ax.plot(x, y) The input quantiles can be any shape of array, as long as the last axis labels the components. This allows us for instance to display the frozen pdf for a non-isotropic random variable in 2D as follows: >>> x, y = np.mgrid[-1:1:.01, -1:1:.01] >>> pos = np.empty(x.shape + (2,)) >>> pos[:, :, 0] = x; pos[:, :, 1] = y >>> rv = multivariate_normal([0.5, -0.2], [[2.0, 0.3], [0.3, 0.5]]) >>> fig2 = plt.figure() >>> ax2 = fig2.add_subplot(111) >>> ax2.contourf(x, y, rv.pdf(pos)) """ def __init__(self, seed=None): super(multivariate_normal_gen, self).__init__(seed) self.__doc__ = doccer.docformat(self.__doc__, docdict_params) def __call__(self, mean=None, cov=1, allow_singular=False, seed=None): """ Create a frozen multivariate normal distribution. See `multivariate_normal_frozen` for more information. """ return multivariate_normal_frozen(mean, cov, allow_singular=allow_singular, seed=seed) def _logpdf(self, x, mean, prec_U, log_det_cov, rank): """ Parameters ---------- x : ndarray Points at which to evaluate the log of the probability density function mean : ndarray Mean of the distribution prec_U : ndarray A decomposition such that np.dot(prec_U, prec_U.T) is the precision matrix, i.e. inverse of the covariance matrix. log_det_cov : float Logarithm of the determinant of the covariance matrix rank : int Rank of the covariance matrix. Notes ----- As this function does no argument checking, it should not be called directly; use 'logpdf' instead. """ dev = x - mean maha = np.sum(np.square(np.dot(dev, prec_U)), axis=-1) return -0.5 * (rank * _LOG_2PI + log_det_cov + maha) def logpdf(self, x, mean, cov, allow_singular=False): """ Log of the multivariate normal probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. %(_doc_default_callparams)s Returns ------- pdf : ndarray Log of the probability density function evaluated at `x` Notes ----- %(_doc_callparams_note)s """ dim, mean, cov = _process_parameters(None, mean, cov) x = _process_quantiles(x, dim) psd = _PSD(cov, allow_singular=allow_singular) out = self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank) return _squeeze_output(out) def pdf(self, x, mean, cov, allow_singular=False): """ Multivariate normal probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. %(_doc_default_callparams)s Returns ------- pdf : ndarray Probability density function evaluated at `x` Notes ----- %(_doc_callparams_note)s """ dim, mean, cov = _process_parameters(None, mean, cov) x = _process_quantiles(x, dim) psd = _PSD(cov, allow_singular=allow_singular) out = np.exp(self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank)) return _squeeze_output(out) def rvs(self, mean=None, cov=1, size=1, random_state=None): """ Draw random samples from a multivariate normal distribution. Parameters ---------- %(_doc_default_callparams)s size : integer, optional Number of samples to draw (default 1). %(_doc_random_state)s Returns ------- rvs : ndarray or scalar Random variates of size (`size`, `N`), where `N` is the dimension of the random variable. Notes ----- %(_doc_callparams_note)s """ dim, mean, cov = _process_parameters(None, mean, cov) random_state = self._get_random_state(random_state) out = random_state.multivariate_normal(mean, cov, size) return _squeeze_output(out) def entropy(self, mean=None, cov=1): """ Compute the differential entropy of the multivariate normal. Parameters ---------- %(_doc_default_callparams)s Returns ------- h : scalar Entropy of the multivariate normal distribution Notes ----- %(_doc_callparams_note)s """ dim, mean, cov = _process_parameters(None, mean, cov) _, logdet = np.linalg.slogdet(2 * np.pi * np.e * cov) return 0.5 * logdet multivariate_normal = multivariate_normal_gen() class multivariate_normal_frozen(multi_rv_frozen): def __init__(self, mean=None, cov=1, allow_singular=False, seed=None): """ Create a frozen multivariate normal distribution. Parameters ---------- mean : array_like, optional Mean of the distribution (default zero) cov : array_like, optional Covariance matrix of the distribution (default one) allow_singular : bool, optional If this flag is True then tolerate a singular covariance matrix (default False). seed : None or int or np.random.RandomState instance, optional This parameter defines the RandomState object to use for drawing random variates. If None (or np.random), the global np.random state is used. If integer, it is used to seed the local RandomState instance Default is None. Examples -------- When called with the default parameters, this will create a 1D random variable with mean 0 and covariance 1: >>> from scipy.stats import multivariate_normal >>> r = multivariate_normal() >>> r.mean array([ 0.]) >>> r.cov array([[1.]]) """ self.dim, self.mean, self.cov = _process_parameters(None, mean, cov) self.cov_info = _PSD(self.cov, allow_singular=allow_singular) self._dist = multivariate_normal_gen(seed) def logpdf(self, x): x = _process_quantiles(x, self.dim) out = self._dist._logpdf(x, self.mean, self.cov_info.U, self.cov_info.log_pdet, self.cov_info.rank) return _squeeze_output(out) def pdf(self, x): return np.exp(self.logpdf(x)) def rvs(self, size=1, random_state=None): return self._dist.rvs(self.mean, self.cov, size, random_state) def entropy(self): """ Computes the differential entropy of the multivariate normal. Returns ------- h : scalar Entropy of the multivariate normal distribution """ log_pdet = self.cov_info.log_pdet rank = self.cov_info.rank return 0.5 * (rank * (_LOG_2PI + 1) + log_pdet) # Set frozen generator docstrings from corresponding docstrings in # multivariate_normal_gen and fill in default strings in class docstrings for name in ['logpdf', 'pdf', 'rvs']: method = multivariate_normal_gen.__dict__[name] method_frozen = multivariate_normal_frozen.__dict__[name] method_frozen.__doc__ = doccer.docformat(method.__doc__, docdict_noparams) method.__doc__ = doccer.docformat(method.__doc__, docdict_params) _dirichlet_doc_default_callparams = """\ alpha : array_like The concentration parameters. The number of entries determines the dimensionality of the distribution. """ _dirichlet_doc_frozen_callparams = "" _dirichlet_doc_frozen_callparams_note = \ """See class definition for a detailed description of parameters.""" dirichlet_docdict_params = { '_dirichlet_doc_default_callparams': _dirichlet_doc_default_callparams, '_doc_random_state': _doc_random_state } dirichlet_docdict_noparams = { '_dirichlet_doc_default_callparams': _dirichlet_doc_frozen_callparams, '_doc_random_state': _doc_random_state } def _dirichlet_check_parameters(alpha): alpha = np.asarray(alpha) if np.min(alpha) <= 0: raise ValueError("All parameters must be greater than 0") elif alpha.ndim != 1: raise ValueError("Parameter vector 'a' must be one dimensional, " + "but a.shape = %s." % str(alpha.shape)) return alpha def _dirichlet_check_input(alpha, x): x = np.asarray(x) if x.shape[0] + 1 != alpha.shape[0] and x.shape[0] != alpha.shape[0]: raise ValueError("Vector 'x' must have one entry less then the" + " parameter vector 'a', but alpha.shape = " + "%s and " % alpha.shape + "x.shape = %s." % x.shape) if x.shape[0] != alpha.shape[0]: xk = np.array([1 - np.sum(x, 0)]) if xk.ndim == 1: x = np.append(x, xk) elif xk.ndim == 2: x = np.vstack((x, xk)) else: raise ValueError("The input must be one dimensional or a two " "dimensional matrix containing the entries.") if np.min(x) < 0: raise ValueError("Each entry in 'x' must be greater or equal zero.") if np.max(x) > 1: raise ValueError("Each entry in 'x' must be smaller or equal one.") if (np.abs(np.sum(x, 0) - 1.0) > 10e-10).any(): raise ValueError("The input vector 'x' must lie within the normal " + "simplex. but sum(x)=%f." % np.sum(x, 0)) return x def _lnB(alpha): r""" Internal helper function to compute the log of the useful quotient .. math:: B(\alpha) = \frac{\prod_{i=1}{K}\Gamma(\alpha_i)}{\Gamma\left(\sum_{i=1}^{K}\alpha_i\right)} Parameters ---------- %(_dirichlet_doc_default_callparams)s Returns ------- B : scalar Helper quotient, internal use only """ return np.sum(gammaln(alpha)) - gammaln(np.sum(alpha)) class dirichlet_gen(multi_rv_generic): r""" A Dirichlet random variable. The `alpha` keyword specifies the concentration parameters of the distribution. .. versionadded:: 0.15.0 Methods ------- ``pdf(x, alpha)`` Probability density function. ``logpdf(x, alpha)`` Log of the probability density function. ``rvs(alpha, size=1, random_state=None)`` Draw random samples from a Dirichlet distribution. ``mean(alpha)`` The mean of the Dirichlet distribution ``var(alpha)`` The variance of the Dirichlet distribution ``entropy(alpha)`` Compute the differential entropy of the multivariate normal. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. %(_dirichlet_doc_default_callparams)s %(_doc_random_state)s Alternatively, the object may be called (as a function) to fix concentration parameters, returning a "frozen" Dirichlet random variable: rv = dirichlet(alpha) - Frozen object with the same methods but holding the given concentration parameters fixed. Notes ----- Each :math:`\alpha` entry must be positive. The distribution has only support on the simplex defined by .. math:: \sum_{i=1}^{K} x_i \le 1 The probability density function for `dirichlet` is .. math:: f(x) = \frac{1}{\mathrm{B}(\boldsymbol\alpha)} \prod_{i=1}^K x_i^{\alpha_i - 1} where .. math:: \mathrm{B}(\boldsymbol\alpha) = \frac{\prod_{i=1}^K \Gamma(\alpha_i)} {\Gamma\bigl(\sum_{i=1}^K \alpha_i\bigr)} and :math:`\boldsymbol\alpha=(\alpha_1,\ldots,\alpha_K)`, the concentration parameters and :math:`K` is the dimension of the space where :math:`x` takes values. """ def __init__(self, seed=None): super(dirichlet_gen, self).__init__(seed) self.__doc__ = doccer.docformat(self.__doc__, dirichlet_docdict_params) def __call__(self, alpha, seed=None): return dirichlet_frozen(alpha, seed=seed) def _logpdf(self, x, alpha): """ Parameters ---------- x : ndarray Points at which to evaluate the log of the probability density function %(_dirichlet_doc_default_callparams)s Notes ----- As this function does no argument checking, it should not be called directly; use 'logpdf' instead. """ lnB = _lnB(alpha) return - lnB + np.sum((np.log(x.T) * (alpha - 1)).T, 0) def logpdf(self, x, alpha): """ Log of the Dirichlet probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. %(_dirichlet_doc_default_callparams)s Returns ------- pdf : ndarray Log of the probability density function evaluated at `x`. """ alpha = _dirichlet_check_parameters(alpha) x = _dirichlet_check_input(alpha, x) out = self._logpdf(x, alpha) return _squeeze_output(out) def pdf(self, x, alpha): """ The Dirichlet probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. %(_dirichlet_doc_default_callparams)s Returns ------- pdf : ndarray The probability density function evaluated at `x`. """ alpha = _dirichlet_check_parameters(alpha) x = _dirichlet_check_input(alpha, x) out = np.exp(self._logpdf(x, alpha)) return _squeeze_output(out) def mean(self, alpha): """ Compute the mean of the dirichlet distribution. Parameters ---------- %(_dirichlet_doc_default_callparams)s Returns ------- mu : scalar Mean of the Dirichlet distribution """ alpha = _dirichlet_check_parameters(alpha) out = alpha / (np.sum(alpha)) return _squeeze_output(out) def var(self, alpha): """ Compute the variance of the dirichlet distribution. Parameters ---------- %(_dirichlet_doc_default_callparams)s Returns ------- v : scalar Variance of the Dirichlet distribution """ alpha = _dirichlet_check_parameters(alpha) alpha0 = np.sum(alpha) out = (alpha * (alpha0 - alpha)) / ((alpha0 * alpha0) * (alpha0 + 1)) return out def entropy(self, alpha): """ Compute the differential entropy of the dirichlet distribution. Parameters ---------- %(_dirichlet_doc_default_callparams)s Returns ------- h : scalar Entropy of the Dirichlet distribution """ alpha = _dirichlet_check_parameters(alpha) alpha0 = np.sum(alpha) lnB = _lnB(alpha) K = alpha.shape[0] out = lnB + (alpha0 - K) * scipy.special.psi(alpha0) - np.sum( (alpha - 1) * scipy.special.psi(alpha)) return _squeeze_output(out) def rvs(self, alpha, size=1, random_state=None): """ Draw random samples from a Dirichlet distribution. Parameters ---------- %(_dirichlet_doc_default_callparams)s size : int, optional Number of samples to draw (default 1). %(_doc_random_state)s Returns ------- rvs : ndarray or scalar Random variates of size (`size`, `N`), where `N` is the dimension of the random variable. """ alpha = _dirichlet_check_parameters(alpha) random_state = self._get_random_state(random_state) return random_state.dirichlet(alpha, size=size) dirichlet = dirichlet_gen() class dirichlet_frozen(multi_rv_frozen): def __init__(self, alpha, seed=None): self.alpha = _dirichlet_check_parameters(alpha) self._dist = dirichlet_gen(seed) def logpdf(self, x): return self._dist.logpdf(x, self.alpha) def pdf(self, x): return self._dist.pdf(x, self.alpha) def mean(self): return self._dist.mean(self.alpha) def var(self): return self._dist.var(self.alpha) def entropy(self): return self._dist.entropy(self.alpha) def rvs(self, size=1, random_state=None): return self._dist.rvs(self.alpha, size, random_state) # Set frozen generator docstrings from corresponding docstrings in # multivariate_normal_gen and fill in default strings in class docstrings for name in ['logpdf', 'pdf', 'rvs', 'mean', 'var', 'entropy']: method = dirichlet_gen.__dict__[name] method_frozen = dirichlet_frozen.__dict__[name] method_frozen.__doc__ = doccer.docformat( method.__doc__, dirichlet_docdict_noparams) method.__doc__ = doccer.docformat(method.__doc__, dirichlet_docdict_params) _wishart_doc_default_callparams = """\ df : int Degrees of freedom, must be greater than or equal to dimension of the scale matrix scale : array_like Symmetric positive definite scale matrix of the distribution """ _wishart_doc_callparams_note = "" _wishart_doc_frozen_callparams = "" _wishart_doc_frozen_callparams_note = \ """See class definition for a detailed description of parameters.""" wishart_docdict_params = { '_doc_default_callparams': _wishart_doc_default_callparams, '_doc_callparams_note': _wishart_doc_callparams_note, '_doc_random_state': _doc_random_state } wishart_docdict_noparams = { '_doc_default_callparams': _wishart_doc_frozen_callparams, '_doc_callparams_note': _wishart_doc_frozen_callparams_note, '_doc_random_state': _doc_random_state } class wishart_gen(multi_rv_generic): r""" A Wishart random variable. The `df` keyword specifies the degrees of freedom. The `scale` keyword specifies the scale matrix, which must be symmetric and positive definite. In this context, the scale matrix is often interpreted in terms of a multivariate normal precision matrix (the inverse of the covariance matrix). Methods ------- ``pdf(x, df, scale)`` Probability density function. ``logpdf(x, df, scale)`` Log of the probability density function. ``rvs(df, scale, size=1, random_state=None)`` Draw random samples from a Wishart distribution. ``entropy()`` Compute the differential entropy of the Wishart distribution. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. %(_doc_default_callparams)s %(_doc_random_state)s Alternatively, the object may be called (as a function) to fix the degrees of freedom and scale parameters, returning a "frozen" Wishart random variable: rv = wishart(df=1, scale=1) - Frozen object with the same methods but holding the given degrees of freedom and scale fixed. See Also -------- invwishart, chi2 Notes ----- %(_doc_callparams_note)s The scale matrix `scale` must be a symmetric positive definite matrix. Singular matrices, including the symmetric positive semi-definite case, are not supported. The Wishart distribution is often denoted .. math:: W_p(\nu, \Sigma) where :math:`\nu` is the degrees of freedom and :math:`\Sigma` is the :math:`p \times p` scale matrix. The probability density function for `wishart` has support over positive definite matrices :math:`S`; if :math:`S \sim W_p(\nu, \Sigma)`, then its PDF is given by: .. math:: f(S) = \frac{|S|^{\frac{\nu - p - 1}{2}}}{2^{ \frac{\nu p}{2} } |\Sigma|^\frac{\nu}{2} \Gamma_p \left ( \frac{\nu}{2} \right )} \exp\left( -tr(\Sigma^{-1} S) / 2 \right) If :math:`S \sim W_p(\nu, \Sigma)` (Wishart) then :math:`S^{-1} \sim W_p^{-1}(\nu, \Sigma^{-1})` (inverse Wishart). If the scale matrix is 1-dimensional and equal to one, then the Wishart distribution :math:`W_1(\nu, 1)` collapses to the :math:`\chi^2(\nu)` distribution. .. versionadded:: 0.16.0 References ---------- .. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach", Wiley, 1983. .. [2] W.B. Smith and R.R. Hocking, "Algorithm AS 53: Wishart Variate Generator", Applied Statistics, vol. 21, pp. 341-345, 1972. Examples -------- >>> import matplotlib.pyplot as plt >>> from scipy.stats import wishart, chi2 >>> x = np.linspace(1e-5, 8, 100) >>> w = wishart.pdf(x, df=3, scale=1); w[:5] array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ]) >>> c = chi2.pdf(x, 3); c[:5] array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ]) >>> plt.plot(x, w) The input quantiles can be any shape of array, as long as the last axis labels the components. """ def __init__(self, seed=None): super(wishart_gen, self).__init__(seed) self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params) def __call__(self, df=None, scale=None, seed=None): """ Create a frozen Wishart distribution. See `wishart_frozen` for more information. """ return wishart_frozen(df, scale, seed) def _process_parameters(self, df, scale): if scale is None: scale = 1.0 scale = np.asarray(scale, dtype=float) if scale.ndim == 0: scale = scale[np.newaxis,np.newaxis] elif scale.ndim == 1: scale = np.diag(scale) elif scale.ndim == 2 and not scale.shape[0] == scale.shape[1]: raise ValueError("Array 'scale' must be square if it is two" " dimensional, but scale.scale = %s." % str(scale.shape)) elif scale.ndim > 2: raise ValueError("Array 'scale' must be at most two-dimensional," " but scale.ndim = %d" % scale.ndim) dim = scale.shape[0] if df is None: df = dim elif not np.isscalar(df): raise ValueError("Degrees of freedom must be a scalar.") elif df < dim: raise ValueError("Degrees of freedom cannot be less than dimension" " of scale matrix, but df = %d" % df) return dim, df, scale def _process_quantiles(self, x, dim): """ Adjust quantiles array so that last axis labels the components of each data point. """ x = np.asarray(x, dtype=float) if x.ndim == 0: x = x * np.eye(dim)[:, :, np.newaxis] if x.ndim == 1: if dim == 1: x = x[np.newaxis, np.newaxis, :] else: x = np.diag(x)[:, :, np.newaxis] elif x.ndim == 2: if not x.shape[0] == x.shape[1]: raise ValueError("Quantiles must be square if they are two" " dimensional, but x.shape = %s." % str(x.shape)) x = x[:, :, np.newaxis] elif x.ndim == 3: if not x.shape[0] == x.shape[1]: raise ValueError("Quantiles must be square in the first two" " dimensions if they are three dimensional" ", but x.shape = %s." % str(x.shape)) elif x.ndim > 3: raise ValueError("Quantiles must be at most two-dimensional with" " an additional dimension for multiple" "components, but x.ndim = %d" % x.ndim) # Now we have 3-dim array; should have shape [dim, dim, *] if not x.shape[0:2] == (dim, dim): raise ValueError('Quantiles have incompatible dimensions: should' ' be %s, got %s.' % ((dim, dim), x.shape[0:2])) return x def _process_size(self, size): size = np.asarray(size) if size.ndim == 0: size = size[np.newaxis] elif size.ndim > 1: raise ValueError('Size must be an integer or tuple of integers;' ' thus must have dimension <= 1.' ' Got size.ndim = %s' % str(tuple(size))) n = size.prod() shape = tuple(size) return n, shape def _logpdf(self, x, dim, df, scale, log_det_scale, C): """ Parameters ---------- x : ndarray Points at which to evaluate the log of the probability density function dim : int Dimension of the scale matrix df : int Degrees of freedom scale : ndarray Scale matrix log_det_scale : float Logarithm of the determinant of the scale matrix C : ndarray Cholesky factorization of the scale matrix, lower triagular. Notes ----- As this function does no argument checking, it should not be called directly; use 'logpdf' instead. """ # log determinant of x # Note: x has components along the last axis, so that x.T has # components alone the 0-th axis. Then since det(A) = det(A'), this # gives us a 1-dim vector of determinants # Retrieve tr(scale^{-1} x) log_det_x = np.zeros(x.shape[-1]) scale_inv_x = np.zeros(x.shape) tr_scale_inv_x = np.zeros(x.shape[-1]) for i in range(x.shape[-1]): _, log_det_x[i] = self._cholesky_logdet(x[:,:,i]) scale_inv_x[:,:,i] = scipy.linalg.cho_solve((C, True), x[:,:,i]) tr_scale_inv_x[i] = scale_inv_x[:,:,i].trace() # Log PDF out = ((0.5 * (df - dim - 1) * log_det_x - 0.5 * tr_scale_inv_x) - (0.5 * df * dim * _LOG_2 + 0.5 * df * log_det_scale + multigammaln(0.5*df, dim))) return out def logpdf(self, x, df, scale): """ Log of the Wishart probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. Each quantile must be a symmetric positive definite matrix. %(_doc_default_callparams)s Returns ------- pdf : ndarray Log of the probability density function evaluated at `x` Notes ----- %(_doc_callparams_note)s """ dim, df, scale = self._process_parameters(df, scale) x = self._process_quantiles(x, dim) # Cholesky decomposition of scale, get log(det(scale)) C, log_det_scale = self._cholesky_logdet(scale) out = self._logpdf(x, dim, df, scale, log_det_scale, C) return _squeeze_output(out) def pdf(self, x, df, scale): """ Wishart probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. Each quantile must be a symmetric positive definite matrix. %(_doc_default_callparams)s Returns ------- pdf : ndarray Probability density function evaluated at `x` Notes ----- %(_doc_callparams_note)s """ return np.exp(self.logpdf(x, df, scale)) def _mean(self, dim, df, scale): """ Parameters ---------- dim : int Dimension of the scale matrix %(_doc_default_callparams)s Notes ----- As this function does no argument checking, it should not be called directly; use 'mean' instead. """ return df * scale def mean(self, df, scale): """ Mean of the Wishart distribution Parameters ---------- %(_doc_default_callparams)s Returns ------- mean : float The mean of the distribution """ dim, df, scale = self._process_parameters(df, scale) out = self._mean(dim, df, scale) return _squeeze_output(out) def _mode(self, dim, df, scale): """ Parameters ---------- dim : int Dimension of the scale matrix %(_doc_default_callparams)s Notes ----- As this function does no argument checking, it should not be called directly; use 'mode' instead. """ if df >= dim + 1: out = (df-dim-1) * scale else: out = None return out def mode(self, df, scale): """ Mode of the Wishart distribution Only valid if the degrees of freedom are greater than the dimension of the scale matrix. Parameters ---------- %(_doc_default_callparams)s Returns ------- mode : float or None The Mode of the distribution """ dim, df, scale = self._process_parameters(df, scale) out = self._mode(dim, df, scale) return _squeeze_output(out) if out is not None else out def _var(self, dim, df, scale): """ Parameters ---------- dim : int Dimension of the scale matrix %(_doc_default_callparams)s Notes ----- As this function does no argument checking, it should not be called directly; use 'var' instead. """ var = scale**2 diag = scale.diagonal() # 1 x dim array var += np.outer(diag, diag) var *= df return var def var(self, df, scale): """ Variance of the Wishart distribution Parameters ---------- %(_doc_default_callparams)s Returns ------- var : float The variance of the distribution """ dim, df, scale = self._process_parameters(df, scale) out = self._var(dim, df, scale) return _squeeze_output(out) def _standard_rvs(self, n, shape, dim, df, random_state): """ Parameters ---------- n : integer Number of variates to generate shape : iterable Shape of the variates to generate dim : int Dimension of the scale matrix df : int Degrees of freedom random_state : np.random.RandomState instance RandomState used for drawing the random variates. Notes ----- As this function does no argument checking, it should not be called directly; use 'rvs' instead. """ # Random normal variates for off-diagonal elements n_tril = dim * (dim-1) // 2 covariances = random_state.normal( size=n*n_tril).reshape(shape+(n_tril,)) # Random chi-square variates for diagonal elements variances = np.r_[[random_state.chisquare(df-(i+1)+1, size=n)**0.5 for i in range(dim)]].reshape((dim,) + shape[::-1]).T # Create the A matri(ces) - lower triangular A = np.zeros(shape + (dim, dim)) # Input the covariances size_idx = tuple([slice(None,None,None)]*len(shape)) tril_idx = np.tril_indices(dim, k=-1) A[size_idx + tril_idx] = covariances # Input the variances diag_idx = np.diag_indices(dim) A[size_idx + diag_idx] = variances return A def _rvs(self, n, shape, dim, df, C, random_state): """ Parameters ---------- n : integer Number of variates to generate shape : iterable Shape of the variates to generate dim : int Dimension of the scale matrix df : int Degrees of freedom scale : ndarray Scale matrix C : ndarray Cholesky factorization of the scale matrix, lower triangular. %(_doc_random_state)s Notes ----- As this function does no argument checking, it should not be called directly; use 'rvs' instead. """ random_state = self._get_random_state(random_state) # Calculate the matrices A, which are actually lower triangular # Cholesky factorizations of a matrix B such that B ~ W(df, I) A = self._standard_rvs(n, shape, dim, df, random_state) # Calculate SA = C A A' C', where SA ~ W(df, scale) # Note: this is the product of a (lower) (lower) (lower)' (lower)' # or, denoting B = AA', it is C B C' where C is the lower # triangular Cholesky factorization of the scale matrix. # this appears to conflict with the instructions in [1]_, which # suggest that it should be D' B D where D is the lower # triangular factorization of the scale matrix. However, it is # meant to refer to the Bartlett (1933) representation of a # Wishart random variate as L A A' L' where L is lower triangular # so it appears that understanding D' to be upper triangular # is either a typo in or misreading of [1]_. for index in np.ndindex(shape): CA = np.dot(C, A[index]) A[index] = np.dot(CA, CA.T) return A def rvs(self, df, scale, size=1, random_state=None): """ Draw random samples from a Wishart distribution. Parameters ---------- %(_doc_default_callparams)s size : integer or iterable of integers, optional Number of samples to draw (default 1). %(_doc_random_state)s Returns ------- rvs : ndarray Random variates of shape (`size`) + (`dim`, `dim), where `dim` is the dimension of the scale matrix. Notes ----- %(_doc_callparams_note)s """ n, shape = self._process_size(size) dim, df, scale = self._process_parameters(df, scale) # Cholesky decomposition of scale C = scipy.linalg.cholesky(scale, lower=True) out = self._rvs(n, shape, dim, df, C, random_state) return _squeeze_output(out) def _entropy(self, dim, df, log_det_scale): """ Parameters ---------- dim : int Dimension of the scale matrix df : int Degrees of freedom log_det_scale : float Logarithm of the determinant of the scale matrix Notes ----- As this function does no argument checking, it should not be called directly; use 'entropy' instead. """ return ( 0.5 * (dim+1) * log_det_scale + 0.5 * dim * (dim+1) * _LOG_2 + multigammaln(0.5*df, dim) - 0.5 * (df - dim - 1) * np.sum( [psi(0.5*(df + 1 - (i+1))) for i in range(dim)] ) + 0.5 * df * dim ) def entropy(self, df, scale): """ Compute the differential entropy of the Wishart. Parameters ---------- %(_doc_default_callparams)s Returns ------- h : scalar Entropy of the Wishart distribution Notes ----- %(_doc_callparams_note)s """ dim, df, scale = self._process_parameters(df, scale) _, log_det_scale = self._cholesky_logdet(scale) return self._entropy(dim, df, log_det_scale) def _cholesky_logdet(self, scale): """ Compute Cholesky decomposition and determine (log(det(scale)). Parameters ---------- scale : ndarray Scale matrix. Returns ------- c_decomp : ndarray The Cholesky decomposition of `scale`. logdet : scalar The log of the determinant of `scale`. Notes ----- This computation of ``logdet`` is equivalent to ``np.linalg.slogdet(scale)``. It is ~2x faster though. """ c_decomp = scipy.linalg.cholesky(scale, lower=True) logdet = 2 * np.sum(np.log(c_decomp.diagonal())) return c_decomp, logdet wishart = wishart_gen() class wishart_frozen(multi_rv_frozen): """ Create a frozen Wishart distribution. Parameters ---------- df : array_like Degrees of freedom of the distribution scale : array_like Scale matrix of the distribution seed : None or int or np.random.RandomState instance, optional This parameter defines the RandomState object to use for drawing random variates. If None (or np.random), the global np.random state is used. If integer, it is used to seed the local RandomState instance Default is None. """ def __init__(self, df, scale, seed=None): self._dist = wishart_gen(seed) self.dim, self.df, self.scale = self._dist._process_parameters( df, scale) self.C, self.log_det_scale = self._dist._cholesky_logdet(self.scale) def logpdf(self, x): x = self._dist._process_quantiles(x, self.dim) out = self._dist._logpdf(x, self.dim, self.df, self.scale, self.log_det_scale, self.C) return _squeeze_output(out) def pdf(self, x): return np.exp(self.logpdf(x)) def mean(self): out = self._dist._mean(self.dim, self.df, self.scale) return _squeeze_output(out) def mode(self): out = self._dist._mode(self.dim, self.df, self.scale) return _squeeze_output(out) if out is not None else out def var(self): out = self._dist._var(self.dim, self.df, self.scale) return _squeeze_output(out) def rvs(self, size=1, random_state=None): n, shape = self._dist._process_size(size) out = self._dist._rvs(n, shape, self.dim, self.df, self.C, random_state) return _squeeze_output(out) def entropy(self): return self._dist._entropy(self.dim, self.df, self.log_det_scale) # Set frozen generator docstrings from corresponding docstrings in # Wishart and fill in default strings in class docstrings for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs', 'entropy']: method = wishart_gen.__dict__[name] method_frozen = wishart_frozen.__dict__[name] method_frozen.__doc__ = doccer.docformat( method.__doc__, wishart_docdict_noparams) method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params) from numpy import asarray_chkfinite, asarray from scipy.linalg.misc import LinAlgError from scipy.linalg.lapack import get_lapack_funcs def _cho_inv_batch(a, check_finite=True): """ Invert the matrices a_i, using a Cholesky factorization of A, where a_i resides in the last two dimensions of a and the other indices describe the index i. Overwrites the data in a. Parameters ---------- a : array Array of matrices to invert, where the matrices themselves are stored in the last two dimensions. check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- x : array Array of inverses of the matrices ``a_i``. See also -------- scipy.linalg.cholesky : Cholesky factorization of a matrix """ if check_finite: a1 = asarray_chkfinite(a) else: a1 = asarray(a) if len(a1.shape) < 2 or a1.shape[-2] != a1.shape[-1]: raise ValueError('expected square matrix in last two dimensions') potrf, potri = get_lapack_funcs(('potrf','potri'), (a1,)) tril_idx = np.tril_indices(a.shape[-2], k=-1) triu_idx = np.triu_indices(a.shape[-2], k=1) for index in np.ndindex(a1.shape[:-2]): # Cholesky decomposition a1[index], info = potrf(a1[index], lower=True, overwrite_a=False, clean=False) if info > 0: raise LinAlgError("%d-th leading minor not positive definite" % info) if info < 0: raise ValueError('illegal value in %d-th argument of internal' ' potrf' % -info) # Inversion a1[index], info = potri(a1[index], lower=True, overwrite_c=False) if info > 0: raise LinAlgError("the inverse could not be computed") if info < 0: raise ValueError('illegal value in %d-th argument of internal' ' potrf' % -info) # Make symmetric (dpotri only fills in the lower triangle) a1[index][triu_idx] = a1[index][tril_idx] return a1 class invwishart_gen(wishart_gen): r""" An inverse Wishart random variable. The `df` keyword specifies the degrees of freedom. The `scale` keyword specifies the scale matrix, which must be symmetric and positive definite. In this context, the scale matrix is often interpreted in terms of a multivariate normal covariance matrix. Methods ------- ``pdf(x, df, scale)`` Probability density function. ``logpdf(x, df, scale)`` Log of the probability density function. ``rvs(df, scale, size=1, random_state=None)`` Draw random samples from an inverse Wishart distribution. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. %(_doc_default_callparams)s %(_doc_random_state)s Alternatively, the object may be called (as a function) to fix the degrees of freedom and scale parameters, returning a "frozen" inverse Wishart random variable: rv = invwishart(df=1, scale=1) - Frozen object with the same methods but holding the given degrees of freedom and scale fixed. See Also -------- wishart Notes ----- %(_doc_callparams_note)s The scale matrix `scale` must be a symmetric positive definite matrix. Singular matrices, including the symmetric positive semi-definite case, are not supported. The inverse Wishart distribution is often denoted .. math:: W_p^{-1}(\nu, \Psi) where :math:`\nu` is the degrees of freedom and :math:`\Psi` is the :math:`p \times p` scale matrix. The probability density function for `invwishart` has support over positive definite matrices :math:`S`; if :math:`S \sim W^{-1}_p(\nu, \Sigma)`, then its PDF is given by: .. math:: f(S) = \frac{|\Sigma|^\frac{\nu}{2}}{2^{ \frac{\nu p}{2} } |S|^{\frac{\nu + p + 1}{2}} \Gamma_p \left(\frac{\nu}{2} \right)} \exp\left( -tr(\Sigma S^{-1}) / 2 \right) If :math:`S \sim W_p^{-1}(\nu, \Psi)` (inverse Wishart) then :math:`S^{-1} \sim W_p(\nu, \Psi^{-1})` (Wishart). If the scale matrix is 1-dimensional and equal to one, then the inverse Wishart distribution :math:`W_1(\nu, 1)` collapses to the inverse Gamma distribution with parameters shape = :math:`\frac{\nu}{2}` and scale = :math:`\frac{1}{2}`. .. versionadded:: 0.16.0 References ---------- .. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach", Wiley, 1983. .. [2] M.C. Jones, "Generating Inverse Wishart Matrices", Communications in Statistics - Simulation and Computation, vol. 14.2, pp.511-514, 1985. Examples -------- >>> import matplotlib.pyplot as plt >>> from scipy.stats import invwishart, invgamma >>> x = np.linspace(0.01, 1, 100) >>> iw = invwishart.pdf(x, df=6, scale=1) >>> iw[:3] array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03]) >>> ig = invgamma.pdf(x, 6/2., scale=1./2) >>> ig[:3] array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03]) >>> plt.plot(x, iw) The input quantiles can be any shape of array, as long as the last axis labels the components. """ def __init__(self, seed=None): super(invwishart_gen, self).__init__(seed) self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params) def __call__(self, df=None, scale=None, seed=None): """ Create a frozen inverse Wishart distribution. See `invwishart_frozen` for more information. """ return invwishart_frozen(df, scale, seed) def _logpdf(self, x, dim, df, scale, log_det_scale): """ Parameters ---------- x : ndarray Points at which to evaluate the log of the probability density function. dim : int Dimension of the scale matrix df : int Degrees of freedom scale : ndarray Scale matrix log_det_scale : float Logarithm of the determinant of the scale matrix Notes ----- As this function does no argument checking, it should not be called directly; use 'logpdf' instead. """ log_det_x = np.zeros(x.shape[-1]) #scale_x_inv = np.zeros(x.shape) x_inv = np.copy(x).T if dim > 1: _cho_inv_batch(x_inv) # works in-place else: x_inv = 1./x_inv tr_scale_x_inv = np.zeros(x.shape[-1]) for i in range(x.shape[-1]): C, lower = scipy.linalg.cho_factor(x[:,:,i], lower=True) log_det_x[i] = 2 * np.sum(np.log(C.diagonal())) #scale_x_inv[:,:,i] = scipy.linalg.cho_solve((C, True), scale).T tr_scale_x_inv[i] = np.dot(scale, x_inv[i]).trace() # Log PDF out = ((0.5 * df * log_det_scale - 0.5 * tr_scale_x_inv) - (0.5 * df * dim * _LOG_2 + 0.5 * (df + dim + 1) * log_det_x) - multigammaln(0.5*df, dim)) return out def logpdf(self, x, df, scale): """ Log of the inverse Wishart probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. Each quantile must be a symmetric positive definite matrix. %(_doc_default_callparams)s Returns ------- pdf : ndarray Log of the probability density function evaluated at `x` Notes ----- %(_doc_callparams_note)s """ dim, df, scale = self._process_parameters(df, scale) x = self._process_quantiles(x, dim) _, log_det_scale = self._cholesky_logdet(scale) out = self._logpdf(x, dim, df, scale, log_det_scale) return _squeeze_output(out) def pdf(self, x, df, scale): """ Inverse Wishart probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. Each quantile must be a symmetric positive definite matrix. %(_doc_default_callparams)s Returns ------- pdf : ndarray Probability density function evaluated at `x` Notes ----- %(_doc_callparams_note)s """ return np.exp(self.logpdf(x, df, scale)) def _mean(self, dim, df, scale): """ Parameters ---------- dim : int Dimension of the scale matrix %(_doc_default_callparams)s Notes ----- As this function does no argument checking, it should not be called directly; use 'mean' instead. """ if df > dim + 1: out = scale / (df - dim - 1) else: out = None return out def mean(self, df, scale): """ Mean of the inverse Wishart distribution Only valid if the degrees of freedom are greater than the dimension of the scale matrix plus one. Parameters ---------- %(_doc_default_callparams)s Returns ------- mean : float or None The mean of the distribution """ dim, df, scale = self._process_parameters(df, scale) out = self._mean(dim, df, scale) return _squeeze_output(out) if out is not None else out def _mode(self, dim, df, scale): """ Parameters ---------- dim : int Dimension of the scale matrix %(_doc_default_callparams)s Notes ----- As this function does no argument checking, it should not be called directly; use 'mode' instead. """ return scale / (df + dim + 1) def mode(self, df, scale): """ Mode of the inverse Wishart distribution Parameters ---------- %(_doc_default_callparams)s Returns ------- mode : float The Mode of the distribution """ dim, df, scale = self._process_parameters(df, scale) out = self._mode(dim, df, scale) return _squeeze_output(out) def _var(self, dim, df, scale): """ Parameters ---------- dim : int Dimension of the scale matrix %(_doc_default_callparams)s Notes ----- As this function does no argument checking, it should not be called directly; use 'var' instead. """ if df > dim + 3: var = (df - dim + 1) * scale**2 diag = scale.diagonal() # 1 x dim array var += (df - dim - 1) * np.outer(diag, diag) var /= (df - dim) * (df - dim - 1)**2 * (df - dim - 3) else: var = None return var def var(self, df, scale): """ Variance of the inverse Wishart distribution Only valid if the degrees of freedom are greater than the dimension of the scale matrix plus three. Parameters ---------- %(_doc_default_callparams)s Returns ------- var : float The variance of the distribution """ dim, df, scale = self._process_parameters(df, scale) out = self._var(dim, df, scale) return _squeeze_output(out) if out is not None else out def _rvs(self, n, shape, dim, df, C, random_state): """ Parameters ---------- n : integer Number of variates to generate shape : iterable Shape of the variates to generate dim : int Dimension of the scale matrix df : int Degrees of freedom C : ndarray Cholesky factorization of the scale matrix, lower triagular. %(_doc_random_state)s Notes ----- As this function does no argument checking, it should not be called directly; use 'rvs' instead. """ random_state = self._get_random_state(random_state) # Get random draws A such that A ~ W(df, I) A = super(invwishart_gen, self)._standard_rvs(n, shape, dim, df, random_state) # Calculate SA = (CA)'^{-1} (CA)^{-1} ~ iW(df, scale) eye = np.eye(dim) trtrs = get_lapack_funcs(('trtrs'), (A,)) for index in np.ndindex(A.shape[:-2]): # Calculate CA CA = np.dot(C, A[index]) # Get (C A)^{-1} via triangular solver if dim > 1: CA, info = trtrs(CA, eye, lower=True) if info > 0: raise LinAlgError("Singular matrix.") if info < 0: raise ValueError('Illegal value in %d-th argument of' ' internal trtrs' % -info) else: CA = 1. / CA # Get SA A[index] = np.dot(CA.T, CA) return A def rvs(self, df, scale, size=1, random_state=None): """ Draw random samples from an inverse Wishart distribution. Parameters ---------- %(_doc_default_callparams)s size : integer or iterable of integers, optional Number of samples to draw (default 1). %(_doc_random_state)s Returns ------- rvs : ndarray Random variates of shape (`size`) + (`dim`, `dim), where `dim` is the dimension of the scale matrix. Notes ----- %(_doc_callparams_note)s """ n, shape = self._process_size(size) dim, df, scale = self._process_parameters(df, scale) # Invert the scale eye = np.eye(dim) L, lower = scipy.linalg.cho_factor(scale, lower=True) inv_scale = scipy.linalg.cho_solve((L, lower), eye) # Cholesky decomposition of inverted scale C = scipy.linalg.cholesky(inv_scale, lower=True) out = self._rvs(n, shape, dim, df, C, random_state) return _squeeze_output(out) def entropy(self): # Need to find reference for inverse Wishart entropy raise AttributeError invwishart = invwishart_gen() class invwishart_frozen(multi_rv_frozen): def __init__(self, df, scale, seed=None): """ Create a frozen inverse Wishart distribution. Parameters ---------- df : array_like Degrees of freedom of the distribution scale : array_like Scale matrix of the distribution seed : None or int or np.random.RandomState instance, optional This parameter defines the RandomState object to use for drawing random variates. If None (or np.random), the global np.random state is used. If integer, it is used to seed the local RandomState instance Default is None. """ self._dist = invwishart_gen(seed) self.dim, self.df, self.scale = self._dist._process_parameters( df, scale ) # Get the determinant via Cholesky factorization C, lower = scipy.linalg.cho_factor(self.scale, lower=True) self.log_det_scale = 2 * np.sum(np.log(C.diagonal())) # Get the inverse using the Cholesky factorization eye = np.eye(self.dim) self.inv_scale = scipy.linalg.cho_solve((C, lower), eye) # Get the Cholesky factorization of the inverse scale self.C = scipy.linalg.cholesky(self.inv_scale, lower=True) def logpdf(self, x): x = self._dist._process_quantiles(x, self.dim) out = self._dist._logpdf(x, self.dim, self.df, self.scale, self.log_det_scale) return _squeeze_output(out) def pdf(self, x): return np.exp(self.logpdf(x)) def mean(self): out = self._dist._mean(self.dim, self.df, self.scale) return _squeeze_output(out) if out is not None else out def mode(self): out = self._dist._mode(self.dim, self.df, self.scale) return _squeeze_output(out) def var(self): out = self._dist._var(self.dim, self.df, self.scale) return _squeeze_output(out) if out is not None else out def rvs(self, size=1, random_state=None): n, shape = self._dist._process_size(size) out = self._dist._rvs(n, shape, self.dim, self.df, self.C, random_state) return _squeeze_output(out) def entropy(self): # Need to find reference for inverse Wishart entropy raise AttributeError # Set frozen generator docstrings from corresponding docstrings in # inverse Wishart and fill in default strings in class docstrings for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs']: method = invwishart_gen.__dict__[name] method_frozen = wishart_frozen.__dict__[name] method_frozen.__doc__ = doccer.docformat( method.__doc__, wishart_docdict_noparams) method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
apache-2.0
JesseLivezey/plankton
pylearn2/packaged_dependencies/theano_linear/unshared_conv/localdot.py
5
4839
""" WRITEME """ import logging from ..linear import LinearTransform from .unshared_conv import FilterActs, ImgActs from theano.compat.six.moves import xrange from theano.sandbox import cuda if cuda.cuda_available: import gpu_unshared_conv # register optimizations import numpy as np try: import matplotlib.pyplot as plt except ImportError: pass logger = logging.getLogger(__name__) class LocalDot(LinearTransform): """ LocalDot is an linear operation computationally similar to convolution in the spatial domain, except that whereas convolution applying a single filter or set of filters across an image, the LocalDot has different filterbanks for different points in the image. Mathematically, this is a general linear transform except for a restriction that filters are 0 outside of a spatially localized patch within the image. Image shape is 5-tuple: color_groups colors_per_group rows cols images Filterbank shape is 7-tuple (!) 0 row_positions 1 col_positions 2 colors_per_group 3 height 4 width 5 color_groups 6 filters_per_group The result of left-multiplication a 5-tuple with shape: filter_groups filters_per_group row_positions col_positions images Parameters ---------- filters : WRITEME irows : WRITEME Image rows icols : WRITEME Image columns subsample : WRITEME padding_start : WRITEME filters_shape : WRITEME message : WRITEME """ def __init__(self, filters, irows, icols=None, subsample=(1, 1), padding_start=None, filters_shape=None, message=""): LinearTransform.__init__(self, [filters]) self._filters = filters if filters_shape is None: self._filters_shape = tuple(filters.get_value(borrow=True).shape) else: self._filters_shape = tuple(filters_shape) self._irows = irows if icols is None: self._icols = irows else: self._icols = icols if self._icols != self._irows: raise NotImplementedError('GPU code at least needs square imgs') self._subsample = tuple(subsample) self._padding_start = padding_start if len(self._filters_shape) != 7: raise TypeError('need 7-tuple filter shape', self._filters_shape) if self._subsample[0] != self._subsample[1]: raise ValueError('subsampling must be same in rows and cols') self._filter_acts = FilterActs(self._subsample[0]) self._img_acts = ImgActs(module_stride=self._subsample[0]) if message: self._message = message else: self._message = filters.name def rmul(self, x): """ .. todo:: WRITEME """ assert x.ndim == 5 return self._filter_acts(x, self._filters) def rmul_T(self, x): """ .. todo:: WRITEME """ return self._img_acts(self._filters, x, self._irows, self._icols) def col_shape(self): """ .. todo:: WRITEME """ ishape = self.row_shape() + (-99,) fshape = self._filters_shape hshape, = self._filter_acts.infer_shape(None, (ishape, fshape)) assert hshape[-1] == -99 return hshape[:-1] def row_shape(self): """ .. todo:: WRITEME """ fshape = self._filters_shape fmodulesR, fmodulesC, fcolors, frows, fcols = fshape[:-2] fgroups, filters_per_group = fshape[-2:] return fgroups, fcolors, self._irows, self._icols def print_status(self): """ .. todo:: WRITEME """ raise NotImplementedError("TODO: fix dependence on non-existent " "ndarray_status function") """print ndarray_status( self._filters.get_value(borrow=True), msg='%s{%s}'% (self.__class__.__name__, self._message)) """ def imshow_gray(self): """ .. todo:: WRITEME """ filters = self._filters.get_value() modR, modC, colors, rows, cols, grps, fs_per_grp = filters.shape logger.info(filters.shape) rval = np.zeros(( modR * (rows + 1) - 1, modC * (cols + 1) - 1, )) for rr, modr in enumerate(xrange(0, rval.shape[0], rows + 1)): for cc, modc in enumerate(xrange(0, rval.shape[1], cols + 1)): rval[modr:modr + rows, modc:modc + cols] = filters[rr, cc, 0, :, :, 0, 0] plt.imshow(rval, cmap='gray') return rval
bsd-3-clause
waynenilsen/statsmodels
examples/python/robust_models_0.py
33
2992
## Robust Linear Models from __future__ import print_function import numpy as np import statsmodels.api as sm import matplotlib.pyplot as plt from statsmodels.sandbox.regression.predstd import wls_prediction_std # ## Estimation # # Load data: data = sm.datasets.stackloss.load() data.exog = sm.add_constant(data.exog) # Huber's T norm with the (default) median absolute deviation scaling huber_t = sm.RLM(data.endog, data.exog, M=sm.robust.norms.HuberT()) hub_results = huber_t.fit() print(hub_results.params) print(hub_results.bse) print(hub_results.summary(yname='y', xname=['var_%d' % i for i in range(len(hub_results.params))])) # Huber's T norm with 'H2' covariance matrix hub_results2 = huber_t.fit(cov="H2") print(hub_results2.params) print(hub_results2.bse) # Andrew's Wave norm with Huber's Proposal 2 scaling and 'H3' covariance matrix andrew_mod = sm.RLM(data.endog, data.exog, M=sm.robust.norms.AndrewWave()) andrew_results = andrew_mod.fit(scale_est=sm.robust.scale.HuberScale(), cov="H3") print('Parameters: ', andrew_results.params) # See ``help(sm.RLM.fit)`` for more options and ``module sm.robust.scale`` for scale options # # ## Comparing OLS and RLM # # Artificial data with outliers: nsample = 50 x1 = np.linspace(0, 20, nsample) X = np.column_stack((x1, (x1-5)**2)) X = sm.add_constant(X) sig = 0.3 # smaller error variance makes OLS<->RLM contrast bigger beta = [5, 0.5, -0.0] y_true2 = np.dot(X, beta) y2 = y_true2 + sig*1. * np.random.normal(size=nsample) y2[[39,41,43,45,48]] -= 5 # add some outliers (10% of nsample) # ### Example 1: quadratic function with linear truth # # Note that the quadratic term in OLS regression will capture outlier effects. res = sm.OLS(y2, X).fit() print(res.params) print(res.bse) print(res.predict()) # Estimate RLM: resrlm = sm.RLM(y2, X).fit() print(resrlm.params) print(resrlm.bse) # Draw a plot to compare OLS estimates to the robust estimates: fig = plt.figure(figsize=(12,8)) ax = fig.add_subplot(111) ax.plot(x1, y2, 'o',label="data") ax.plot(x1, y_true2, 'b-', label="True") prstd, iv_l, iv_u = wls_prediction_std(res) ax.plot(x1, res.fittedvalues, 'r-', label="OLS") ax.plot(x1, iv_u, 'r--') ax.plot(x1, iv_l, 'r--') ax.plot(x1, resrlm.fittedvalues, 'g.-', label="RLM") ax.legend(loc="best") # ### Example 2: linear function with linear truth # # Fit a new OLS model using only the linear term and the constant: X2 = X[:,[0,1]] res2 = sm.OLS(y2, X2).fit() print(res2.params) print(res2.bse) # Estimate RLM: resrlm2 = sm.RLM(y2, X2).fit() print(resrlm2.params) print(resrlm2.bse) # Draw a plot to compare OLS estimates to the robust estimates: prstd, iv_l, iv_u = wls_prediction_std(res2) fig, ax = plt.subplots() ax.plot(x1, y2, 'o', label="data") ax.plot(x1, y_true2, 'b-', label="True") ax.plot(x1, res2.fittedvalues, 'r-', label="OLS") ax.plot(x1, iv_u, 'r--') ax.plot(x1, iv_l, 'r--') ax.plot(x1, resrlm2.fittedvalues, 'g.-', label="RLM") ax.legend(loc="best")
bsd-3-clause
felipessalvatore/CNNexample
src/tunning/fc.py
1
2217
import os import sys from random import randint import numpy as np import inspect import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) parentdir = os.path.dirname(currentdir) sys.path.insert(0, parentdir) from util import run_test, get_data_4d, get_time from CNN import CNNModel, train_model, check_valid from DataHolder import DataHolder from Config import Config train_dataset, train_labels, valid_dataset, valid_labels, test_dataset, test_labels = get_data_4d() my_dataholder = DataHolder(train_dataset, train_labels, valid_dataset, valid_labels, test_dataset, test_labels) FC = [5, 10, 15, 20, 30, 40, 60, 200] number_of_exp = len(FC) results = [] duration = [] info = [] for i, fc in enumerate(FC): print("\n ({0} of {1})".format(i + 1, number_of_exp)) my_config = Config(tunning=True, hidden_nodes_1=3 * fc, hidden_nodes_2=2 * fc, hidden_nodes_3=fc) attrs = vars(my_config) config_info = ["%s: %s" % item for item in attrs.items()] info.append(config_info) my_model = CNNModel(my_config, my_dataholder) train_model(my_model, my_dataholder, 10001, 1000, False) current_dur = get_time(train_model, 10001) score = check_valid(my_model) results.append(score) duration.append(current_dur) best_result = max(list(zip(results, FC, duration, info))) result_string = """In an experiment with {0} fully connected sizes the best one is {1} with valid accuracy = {2}. \nThe training takes {3:.2f} seconds using the following params: \n{4}""".format(number_of_exp, best_result[1], best_result[0], best_result[2], best_result[3]) file = open("final.txt", "w") file.write(result_string) file.close() plt.plot(FC, results) plt.xlabel("hidden_nodes_3") plt.ylabel("valid acc") plt.savefig("fc.png") plt.clf() plt.plot(FC, duration) plt.xlabel("hidden_nodes_3") plt.ylabel("duration (s)") plt.savefig("fc_du.png") plt.clf()
mit
sjperkins/tensorflow
tensorflow/python/estimator/canned/dnn_linear_combined_test.py
5
26973
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for dnn_linear_combined.py.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import shutil import tempfile import numpy as np import six from tensorflow.core.example import example_pb2 from tensorflow.core.example import feature_pb2 from tensorflow.python.estimator.canned import dnn_linear_combined from tensorflow.python.estimator.canned import dnn_testing_utils from tensorflow.python.estimator.canned import linear_testing_utils from tensorflow.python.estimator.canned import prediction_keys from tensorflow.python.estimator.export import export from tensorflow.python.estimator.inputs import numpy_io from tensorflow.python.estimator.inputs import pandas_io from tensorflow.python.feature_column import feature_column from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import nn from tensorflow.python.ops import parsing_ops from tensorflow.python.ops import variables as variables_lib from tensorflow.python.platform import gfile from tensorflow.python.platform import test from tensorflow.python.summary.writer import writer_cache from tensorflow.python.training import checkpoint_utils from tensorflow.python.training import gradient_descent from tensorflow.python.training import input as input_lib from tensorflow.python.training import optimizer as optimizer_lib try: # pylint: disable=g-import-not-at-top import pandas as pd HAS_PANDAS = True except IOError: # Pandas writes a temporary file during import. If it fails, don't use pandas. HAS_PANDAS = False except ImportError: HAS_PANDAS = False class DNNOnlyModelFnTest(dnn_testing_utils.BaseDNNModelFnTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) dnn_testing_utils.BaseDNNModelFnTest.__init__(self, self._dnn_only_model_fn) def _dnn_only_model_fn(self, features, labels, mode, head, hidden_units, feature_columns, optimizer='Adagrad', activation_fn=nn.relu, dropout=None, input_layer_partitioner=None, config=None): return dnn_linear_combined._dnn_linear_combined_model_fn( features=features, labels=labels, mode=mode, head=head, linear_feature_columns=[], dnn_hidden_units=hidden_units, dnn_feature_columns=feature_columns, dnn_optimizer=optimizer, dnn_activation_fn=activation_fn, dnn_dropout=dropout, input_layer_partitioner=input_layer_partitioner, config=config) # A function to mimic linear-regressor init reuse same tests. def _linear_regressor_fn(feature_columns, model_dir=None, label_dimension=1, weight_column=None, optimizer='Ftrl', config=None, partitioner=None): return dnn_linear_combined.DNNLinearCombinedRegressor( model_dir=model_dir, linear_feature_columns=feature_columns, linear_optimizer=optimizer, label_dimension=label_dimension, weight_column=weight_column, input_layer_partitioner=partitioner, config=config) class LinearOnlyRegressorPartitionerTest( linear_testing_utils.BaseLinearRegressorPartitionerTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) linear_testing_utils.BaseLinearRegressorPartitionerTest.__init__( self, _linear_regressor_fn) class LinearOnlyRegressorEvaluationTest( linear_testing_utils.BaseLinearRegressorEvaluationTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) linear_testing_utils.BaseLinearRegressorEvaluationTest.__init__( self, _linear_regressor_fn) class LinearOnlyRegressorPredictTest( linear_testing_utils.BaseLinearRegressorPredictTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) linear_testing_utils.BaseLinearRegressorPredictTest.__init__( self, _linear_regressor_fn) class LinearOnlyRegressorIntegrationTest( linear_testing_utils.BaseLinearRegressorIntegrationTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) linear_testing_utils.BaseLinearRegressorIntegrationTest.__init__( self, _linear_regressor_fn) class LinearOnlyRegressorTrainingTest( linear_testing_utils.BaseLinearRegressorTrainingTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) linear_testing_utils.BaseLinearRegressorTrainingTest.__init__( self, _linear_regressor_fn) def _linear_classifier_fn(feature_columns, model_dir=None, n_classes=2, weight_column=None, label_vocabulary=None, optimizer='Ftrl', config=None, partitioner=None): return dnn_linear_combined.DNNLinearCombinedClassifier( model_dir=model_dir, linear_feature_columns=feature_columns, linear_optimizer=optimizer, n_classes=n_classes, weight_column=weight_column, label_vocabulary=label_vocabulary, input_layer_partitioner=partitioner, config=config) class LinearOnlyClassifierTrainingTest( linear_testing_utils.BaseLinearClassifierTrainingTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) linear_testing_utils.BaseLinearClassifierTrainingTest.__init__( self, linear_classifier_fn=_linear_classifier_fn) class LinearOnlyClassifierClassesEvaluationTest( linear_testing_utils.BaseLinearClassifierEvaluationTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) linear_testing_utils.BaseLinearClassifierEvaluationTest.__init__( self, linear_classifier_fn=_linear_classifier_fn) class LinearOnlyClassifierPredictTest( linear_testing_utils.BaseLinearClassifierPredictTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) linear_testing_utils.BaseLinearClassifierPredictTest.__init__( self, linear_classifier_fn=_linear_classifier_fn) class LinearOnlyClassifierIntegrationTest( linear_testing_utils.BaseLinearClassifierIntegrationTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) linear_testing_utils.BaseLinearClassifierIntegrationTest.__init__( self, linear_classifier_fn=_linear_classifier_fn) class DNNLinearCombinedRegressorIntegrationTest(test.TestCase): def setUp(self): self._model_dir = tempfile.mkdtemp() def tearDown(self): if self._model_dir: writer_cache.FileWriterCache.clear() shutil.rmtree(self._model_dir) def _test_complete_flow( self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension, label_dimension, batch_size): linear_feature_columns = [ feature_column.numeric_column('x', shape=(input_dimension,))] dnn_feature_columns = [ feature_column.numeric_column('x', shape=(input_dimension,))] feature_columns = linear_feature_columns + dnn_feature_columns est = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=linear_feature_columns, dnn_hidden_units=(2, 2), dnn_feature_columns=dnn_feature_columns, label_dimension=label_dimension, model_dir=self._model_dir) # TRAIN num_steps = 10 est.train(train_input_fn, steps=num_steps) # EVALUTE scores = est.evaluate(eval_input_fn) self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP]) self.assertIn('loss', six.iterkeys(scores)) # PREDICT predictions = np.array([ x[prediction_keys.PredictionKeys.PREDICTIONS] for x in est.predict(predict_input_fn) ]) self.assertAllEqual((batch_size, label_dimension), predictions.shape) # EXPORT feature_spec = feature_column.make_parse_example_spec(feature_columns) serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn( feature_spec) export_dir = est.export_savedmodel(tempfile.mkdtemp(), serving_input_receiver_fn) self.assertTrue(gfile.Exists(export_dir)) def test_numpy_input_fn(self): """Tests complete flow with numpy_input_fn.""" label_dimension = 2 batch_size = 10 data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32) data = data.reshape(batch_size, label_dimension) # learn y = x train_input_fn = numpy_io.numpy_input_fn( x={'x': data}, y=data, batch_size=batch_size, num_epochs=None, shuffle=True) eval_input_fn = numpy_io.numpy_input_fn( x={'x': data}, y=data, batch_size=batch_size, shuffle=False) predict_input_fn = numpy_io.numpy_input_fn( x={'x': data}, batch_size=batch_size, shuffle=False) self._test_complete_flow( train_input_fn=train_input_fn, eval_input_fn=eval_input_fn, predict_input_fn=predict_input_fn, input_dimension=label_dimension, label_dimension=label_dimension, batch_size=batch_size) def test_pandas_input_fn(self): """Tests complete flow with pandas_input_fn.""" if not HAS_PANDAS: return label_dimension = 1 batch_size = 10 data = np.linspace(0., 2., batch_size, dtype=np.float32) x = pd.DataFrame({'x': data}) y = pd.Series(data) train_input_fn = pandas_io.pandas_input_fn( x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True) eval_input_fn = pandas_io.pandas_input_fn( x=x, y=y, batch_size=batch_size, shuffle=False) predict_input_fn = pandas_io.pandas_input_fn( x=x, batch_size=batch_size, shuffle=False) self._test_complete_flow( train_input_fn=train_input_fn, eval_input_fn=eval_input_fn, predict_input_fn=predict_input_fn, input_dimension=label_dimension, label_dimension=label_dimension, batch_size=batch_size) def test_input_fn_from_parse_example(self): """Tests complete flow with input_fn constructed from parse_example.""" label_dimension = 2 batch_size = 10 data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32) data = data.reshape(batch_size, label_dimension) serialized_examples = [] for datum in data: example = example_pb2.Example(features=feature_pb2.Features( feature={ 'x': feature_pb2.Feature( float_list=feature_pb2.FloatList(value=datum)), 'y': feature_pb2.Feature( float_list=feature_pb2.FloatList(value=datum)), })) serialized_examples.append(example.SerializeToString()) feature_spec = { 'x': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32), 'y': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32), } def _train_input_fn(): feature_map = parsing_ops.parse_example(serialized_examples, feature_spec) features = linear_testing_utils.queue_parsed_features(feature_map) labels = features.pop('y') return features, labels def _eval_input_fn(): feature_map = parsing_ops.parse_example( input_lib.limit_epochs(serialized_examples, num_epochs=1), feature_spec) features = linear_testing_utils.queue_parsed_features(feature_map) labels = features.pop('y') return features, labels def _predict_input_fn(): feature_map = parsing_ops.parse_example( input_lib.limit_epochs(serialized_examples, num_epochs=1), feature_spec) features = linear_testing_utils.queue_parsed_features(feature_map) features.pop('y') return features, None self._test_complete_flow( train_input_fn=_train_input_fn, eval_input_fn=_eval_input_fn, predict_input_fn=_predict_input_fn, input_dimension=label_dimension, label_dimension=label_dimension, batch_size=batch_size) # A function to mimic dnn-classifier init reuse same tests. def _dnn_classifier_fn(hidden_units, feature_columns, model_dir=None, n_classes=2, weight_column=None, label_vocabulary=None, optimizer='Adagrad', config=None, input_layer_partitioner=None): return dnn_linear_combined.DNNLinearCombinedClassifier( model_dir=model_dir, dnn_hidden_units=hidden_units, dnn_feature_columns=feature_columns, dnn_optimizer=optimizer, n_classes=n_classes, weight_column=weight_column, label_vocabulary=label_vocabulary, input_layer_partitioner=input_layer_partitioner, config=config) class DNNOnlyClassifierEvaluateTest( dnn_testing_utils.BaseDNNClassifierEvaluateTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) dnn_testing_utils.BaseDNNClassifierEvaluateTest.__init__( self, _dnn_classifier_fn) class DNNOnlyClassifierPredictTest( dnn_testing_utils.BaseDNNClassifierPredictTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) dnn_testing_utils.BaseDNNClassifierPredictTest.__init__( self, _dnn_classifier_fn) class DNNOnlyClassifierTrainTest( dnn_testing_utils.BaseDNNClassifierTrainTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) dnn_testing_utils.BaseDNNClassifierTrainTest.__init__( self, _dnn_classifier_fn) # A function to mimic dnn-regressor init reuse same tests. def _dnn_regressor_fn(hidden_units, feature_columns, model_dir=None, label_dimension=1, weight_column=None, optimizer='Adagrad', config=None, input_layer_partitioner=None): return dnn_linear_combined.DNNLinearCombinedRegressor( model_dir=model_dir, dnn_hidden_units=hidden_units, dnn_feature_columns=feature_columns, dnn_optimizer=optimizer, label_dimension=label_dimension, weight_column=weight_column, input_layer_partitioner=input_layer_partitioner, config=config) class DNNOnlyRegressorEvaluateTest( dnn_testing_utils.BaseDNNRegressorEvaluateTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) dnn_testing_utils.BaseDNNRegressorEvaluateTest.__init__( self, _dnn_regressor_fn) class DNNOnlyRegressorPredictTest( dnn_testing_utils.BaseDNNRegressorPredictTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) dnn_testing_utils.BaseDNNRegressorPredictTest.__init__( self, _dnn_regressor_fn) class DNNOnlyRegressorTrainTest( dnn_testing_utils.BaseDNNRegressorTrainTest, test.TestCase): def __init__(self, methodName='runTest'): # pylint: disable=invalid-name test.TestCase.__init__(self, methodName) dnn_testing_utils.BaseDNNRegressorTrainTest.__init__( self, _dnn_regressor_fn) class DNNLinearCombinedClassifierIntegrationTest(test.TestCase): def setUp(self): self._model_dir = tempfile.mkdtemp() def tearDown(self): if self._model_dir: writer_cache.FileWriterCache.clear() shutil.rmtree(self._model_dir) def _as_label(self, data_in_float): return np.rint(data_in_float).astype(np.int64) def _test_complete_flow( self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension, n_classes, batch_size): linear_feature_columns = [ feature_column.numeric_column('x', shape=(input_dimension,))] dnn_feature_columns = [ feature_column.numeric_column('x', shape=(input_dimension,))] feature_columns = linear_feature_columns + dnn_feature_columns est = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=linear_feature_columns, dnn_hidden_units=(2, 2), dnn_feature_columns=dnn_feature_columns, n_classes=n_classes, model_dir=self._model_dir) # TRAIN num_steps = 10 est.train(train_input_fn, steps=num_steps) # EVALUTE scores = est.evaluate(eval_input_fn) self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP]) self.assertIn('loss', six.iterkeys(scores)) # PREDICT predicted_proba = np.array([ x[prediction_keys.PredictionKeys.PROBABILITIES] for x in est.predict(predict_input_fn) ]) self.assertAllEqual((batch_size, n_classes), predicted_proba.shape) # EXPORT feature_spec = feature_column.make_parse_example_spec(feature_columns) serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn( feature_spec) export_dir = est.export_savedmodel(tempfile.mkdtemp(), serving_input_receiver_fn) self.assertTrue(gfile.Exists(export_dir)) def test_numpy_input_fn(self): """Tests complete flow with numpy_input_fn.""" n_classes = 3 input_dimension = 2 batch_size = 10 data = np.linspace( 0., n_classes - 1., batch_size * input_dimension, dtype=np.float32) x_data = data.reshape(batch_size, input_dimension) y_data = self._as_label(np.reshape(data[:batch_size], (batch_size, 1))) # learn y = x train_input_fn = numpy_io.numpy_input_fn( x={'x': x_data}, y=y_data, batch_size=batch_size, num_epochs=None, shuffle=True) eval_input_fn = numpy_io.numpy_input_fn( x={'x': x_data}, y=y_data, batch_size=batch_size, shuffle=False) predict_input_fn = numpy_io.numpy_input_fn( x={'x': x_data}, batch_size=batch_size, shuffle=False) self._test_complete_flow( train_input_fn=train_input_fn, eval_input_fn=eval_input_fn, predict_input_fn=predict_input_fn, input_dimension=input_dimension, n_classes=n_classes, batch_size=batch_size) def test_pandas_input_fn(self): """Tests complete flow with pandas_input_fn.""" if not HAS_PANDAS: return input_dimension = 1 n_classes = 2 batch_size = 10 data = np.linspace(0., n_classes - 1., batch_size, dtype=np.float32) x = pd.DataFrame({'x': data}) y = pd.Series(self._as_label(data)) train_input_fn = pandas_io.pandas_input_fn( x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True) eval_input_fn = pandas_io.pandas_input_fn( x=x, y=y, batch_size=batch_size, shuffle=False) predict_input_fn = pandas_io.pandas_input_fn( x=x, batch_size=batch_size, shuffle=False) self._test_complete_flow( train_input_fn=train_input_fn, eval_input_fn=eval_input_fn, predict_input_fn=predict_input_fn, input_dimension=input_dimension, n_classes=n_classes, batch_size=batch_size) def test_input_fn_from_parse_example(self): """Tests complete flow with input_fn constructed from parse_example.""" input_dimension = 2 n_classes = 3 batch_size = 10 data = np.linspace(0., n_classes-1., batch_size * input_dimension, dtype=np.float32) data = data.reshape(batch_size, input_dimension) serialized_examples = [] for datum in data: example = example_pb2.Example(features=feature_pb2.Features( feature={ 'x': feature_pb2.Feature(float_list=feature_pb2.FloatList( value=datum)), 'y': feature_pb2.Feature(int64_list=feature_pb2.Int64List( value=self._as_label(datum[:1]))), })) serialized_examples.append(example.SerializeToString()) feature_spec = { 'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32), 'y': parsing_ops.FixedLenFeature([1], dtypes.int64), } def _train_input_fn(): feature_map = parsing_ops.parse_example(serialized_examples, feature_spec) features = linear_testing_utils.queue_parsed_features(feature_map) labels = features.pop('y') return features, labels def _eval_input_fn(): feature_map = parsing_ops.parse_example( input_lib.limit_epochs(serialized_examples, num_epochs=1), feature_spec) features = linear_testing_utils.queue_parsed_features(feature_map) labels = features.pop('y') return features, labels def _predict_input_fn(): feature_map = parsing_ops.parse_example( input_lib.limit_epochs(serialized_examples, num_epochs=1), feature_spec) features = linear_testing_utils.queue_parsed_features(feature_map) features.pop('y') return features, None self._test_complete_flow( train_input_fn=_train_input_fn, eval_input_fn=_eval_input_fn, predict_input_fn=_predict_input_fn, input_dimension=input_dimension, n_classes=n_classes, batch_size=batch_size) class DNNLinearCombinedTests(test.TestCase): def setUp(self): self._model_dir = tempfile.mkdtemp() def tearDown(self): if self._model_dir: shutil.rmtree(self._model_dir) def _mock_optimizer(self, real_optimizer, var_name_prefix): """Verifies global_step is None and var_names start with given prefix.""" def _minimize(loss, global_step=None, var_list=None): self.assertIsNone(global_step) trainable_vars = var_list or ops.get_collection( ops.GraphKeys.TRAINABLE_VARIABLES) var_names = [var.name for var in trainable_vars] self.assertTrue( all([name.startswith(var_name_prefix) for name in var_names])) # var is used to check this op called by training. var = variables_lib.Variable(0., name=(var_name_prefix + '_called')) with ops.control_dependencies([var.assign(100.)]): return real_optimizer.minimize(loss, global_step, var_list) optimizer_mock = test.mock.NonCallableMagicMock( spec=optimizer_lib.Optimizer, wraps=real_optimizer) optimizer_mock.minimize = test.mock.MagicMock(wraps=_minimize) return optimizer_mock def test_train_op_calls_both_dnn_and_linear(self): opt = gradient_descent.GradientDescentOptimizer(1.) x_column = feature_column.numeric_column('x') input_fn = numpy_io.numpy_input_fn( x={'x': np.array([[0.], [1.]])}, y=np.array([[0.], [1.]]), batch_size=1, shuffle=False) est = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=[x_column], # verifies linear_optimizer is used only for linear part. linear_optimizer=self._mock_optimizer(opt, 'linear'), dnn_hidden_units=(2, 2), dnn_feature_columns=[x_column], # verifies dnn_optimizer is used only for linear part. dnn_optimizer=self._mock_optimizer(opt, 'dnn'), model_dir=self._model_dir) est.train(input_fn, steps=1) # verifies train_op fires linear minimize op self.assertEqual(100., checkpoint_utils.load_variable( self._model_dir, 'binary_logistic_head/linear_called')) # verifies train_op fires dnn minimize op self.assertEqual(100., checkpoint_utils.load_variable( self._model_dir, 'binary_logistic_head/dnn_called')) def test_dnn_and_linear_logits_are_added(self): with ops.Graph().as_default(): variables_lib.Variable([[1.0]], name='linear/linear_model/x/weights') variables_lib.Variable([2.0], name='linear/linear_model/bias_weights') variables_lib.Variable([[3.0]], name='dnn/hiddenlayer_0/kernel') variables_lib.Variable([4.0], name='dnn/hiddenlayer_0/bias') variables_lib.Variable([[5.0]], name='dnn/logits/kernel') variables_lib.Variable([6.0], name='dnn/logits/bias') variables_lib.Variable(1, name='global_step', dtype=dtypes.int64) linear_testing_utils.save_variables_to_ckpt(self._model_dir) x_column = feature_column.numeric_column('x') est = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[x_column], dnn_hidden_units=[1], dnn_feature_columns=[x_column], model_dir=self._model_dir) input_fn = numpy_io.numpy_input_fn( x={'x': np.array([[10.]])}, batch_size=1, shuffle=False) # linear logits = 10*1 + 2 = 12 # dnn logits = (10*3 + 4)*5 + 6 = 176 # logits = dnn + linear = 176 + 12 = 188 self.assertAllClose( { prediction_keys.PredictionKeys.PREDICTIONS: [188.], }, next(est.predict(input_fn=input_fn))) if __name__ == '__main__': test.main()
apache-2.0
klahnakoski/ActiveData
vendor/mo_testing/fuzzytestcase.py
1
9712
# encoding: utf-8 # # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this file, # You can obtain one at http://mozilla.org/MPL/2.0/. # # Contact: Kyle Lahnakoski (kyle@lahnakoski.com) # from __future__ import unicode_literals import datetime import types import unittest from mo_collections.unique_index import UniqueIndex import mo_dots from mo_dots import coalesce, is_container, is_list, literal_field, unwrap, to_data, is_data, is_many from mo_future import is_text, zip_longest, first from mo_logs import Except, Log, suppress_exception from mo_logs.strings import expand_template, quote import mo_math from mo_math import is_number, log10 from mo_times import dates class FuzzyTestCase(unittest.TestCase): """ COMPARE STRUCTURE AND NUMBERS! ONLY THE ATTRIBUTES IN THE expected STRUCTURE ARE TESTED TO EXIST EXTRA ATTRIBUTES ARE IGNORED. NUMBERS ARE MATCHED BY ... * places (UP TO GIVEN SIGNIFICANT DIGITS) * digits (UP TO GIVEN DECIMAL PLACES, WITH NEGATIVE MEANING LEFT-OF-UNITS) * delta (MAXIMUM ABSOLUTE DIFFERENCE FROM expected) """ def __init__(self, *args, **kwargs): unittest.TestCase.__init__(self, *args, **kwargs) self.default_places=15 def set_default_places(self, places): """ WHEN COMPARING float, HOW MANY DIGITS ARE SIGNIFICANT BY DEFAULT """ self.default_places=places def assertAlmostEqual(self, test_value, expected, msg=None, digits=None, places=None, delta=None): if delta or digits: assertAlmostEqual(test_value, expected, msg=msg, digits=digits, places=places, delta=delta) else: assertAlmostEqual(test_value, expected, msg=msg, digits=digits, places=coalesce(places, self.default_places), delta=delta) def assertEqual(self, test_value, expected, msg=None, digits=None, places=None, delta=None): self.assertAlmostEqual(test_value, expected, msg=msg, digits=digits, places=places, delta=delta) def assertRaises(self, problem=None, function=None, *args, **kwargs): if function is None: return RaiseContext(self, problem=problem or Exception) with RaiseContext(self, problem=problem): function(*args, **kwargs) class RaiseContext(object): def __init__(self, this, problem=Exception): self.this = this self.problem = problem def __enter__(self): pass def __exit__(self, exc_type, exc_val, exc_tb): if not exc_val: Log.error("Expecting an error") f = Except.wrap(exc_val) if isinstance(self.problem, (list, tuple)): problems = self.problem else: problems = [self.problem] causes = [] for problem in problems: if isinstance(problem, object.__class__) and issubclass(problem, BaseException) and isinstance(exc_val, problem): return True try: self.this.assertIn(problem, f) return True except Exception as cause: causes.append(cause) Log.error("problem is not raised", cause=first(causes)) def assertAlmostEqual(test, expected, digits=None, places=None, msg=None, delta=None): show_detail = True test = unwrap(test) expected = unwrap(expected) try: if test is None and (is_null_op(expected) or expected is None): return elif test is expected: return elif is_text(expected): assertAlmostEqualValue(test, expected, msg=msg, digits=digits, places=places, delta=delta) elif isinstance(test, UniqueIndex): if test ^ expected: Log.error("Sets do not match") elif is_data(expected) and is_data(test): for k, e in unwrap(expected).items(): t = test.get(k) assertAlmostEqual(t, e, msg=coalesce(msg, "")+"key "+quote(k)+": ", digits=digits, places=places, delta=delta) elif is_data(expected): if is_many(test): test = list(test) if len(test) != 1: Log.error("Expecting data, not a list") test = test[0] for k, e in expected.items(): try: t = test[k] assertAlmostEqual(t, e, msg=msg, digits=digits, places=places, delta=delta) continue except: pass t = mo_dots.get_attr(test, literal_field(k)) assertAlmostEqual(t, e, msg=msg, digits=digits, places=places, delta=delta) elif is_container(test) and isinstance(expected, set): test = set(to_data(t) for t in test) if len(test) != len(expected): Log.error( "Sets do not match, element count different:\n{{test|json|indent}}\nexpecting{{expectedtest|json|indent}}", test=test, expected=expected ) try: return len(set(test)|expected) == len(expected) except: for e in expected: for t in test: try: assertAlmostEqual(t, e, msg=msg, digits=digits, places=places, delta=delta) break except Exception as _: pass else: Log.error("Sets do not match. {{value|json}} not found in {{test|json}}", value=e, test=test) elif isinstance(expected, types.FunctionType): return expected(test) elif hasattr(test, "__iter__") and hasattr(expected, "__iter__"): if test.__class__.__name__ == "ndarray": # numpy test = test.tolist() elif test.__class__.__name__ == "DataFrame": # pandas test = test[test.columns[0]].values.tolist() elif test.__class__.__name__ == "Series": # pandas test = test.values.tolist() if not expected and test == None: return if expected == None: expected = [] # REPRESENT NOTHING for t, e in zip_longest(test, expected): assertAlmostEqual(t, e, msg=msg, digits=digits, places=places, delta=delta) else: assertAlmostEqualValue(test, expected, msg=msg, digits=digits, places=places, delta=delta) except Exception as cause: Log.error( "{{test|json|limit(10000)}} does not match expected {{expected|json|limit(10000)}}", test=test if show_detail else "[can not show]", expected=expected if show_detail else "[can not show]", cause=cause ) def assertAlmostEqualValue(test, expected, digits=None, places=None, msg=None, delta=None): """ Snagged from unittest/case.py, then modified (Aug2014) """ if is_null_op(expected): if test == None: # pandas dataframes reject any comparision with an exception! return else: raise AssertionError(expand_template("{{test|json}} != NULL", locals())) if expected == None: # None has no expectations return if test == expected: # shortcut return if isinstance(expected, (dates.Date, datetime.datetime, datetime.date)): return assertAlmostEqualValue( dates.Date(test).unix, dates.Date(expected).unix, msg=msg, digits=digits, places=places, delta=delta ) if not is_number(expected): # SOME SPECIAL CASES, EXPECTING EMPTY CONTAINERS IS THE SAME AS EXPECTING NULL if is_list(expected) and len(expected) == 0 and test == None: return if is_data(expected) and not expected.keys() and test == None: return if test != expected: raise AssertionError(expand_template("{{test|json}} != {{expected|json}}", locals())) return elif not is_number(test): try: # ASSUME IT IS A UTC DATE test = dates.parse(test).unix except Exception as e: raise AssertionError(expand_template("{{test|json}} != {{expected}}", locals())) num_param = 0 if digits != None: num_param += 1 if places != None: num_param += 1 if delta != None: num_param += 1 if num_param > 1: raise TypeError("specify only one of digits, places or delta") if digits is not None: with suppress_exception: diff = log10(abs(test-expected)) if diff < digits: return standardMsg = expand_template("{{test|json}} != {{expected|json}} within {{digits}} decimal places", locals()) elif delta is not None: if abs(test - expected) <= delta: return standardMsg = expand_template("{{test|json}} != {{expected|json}} within {{delta}} delta", locals()) else: if places is None: places = 15 with suppress_exception: diff = mo_math.log10(abs(test-expected)) if diff == None: return # Exactly the same if diff < mo_math.ceiling(mo_math.log10(abs(test)))-places: return standardMsg = expand_template("{{test|json}} != {{expected|json}} within {{places}} places", locals()) raise AssertionError(coalesce(msg, "") + ": (" + standardMsg + ")") def is_null_op(v): return v.__class__.__name__ == "NullOp"
mpl-2.0
sumspr/scikit-learn
sklearn/metrics/cluster/bicluster.py
359
2797
from __future__ import division import numpy as np from sklearn.utils.linear_assignment_ import linear_assignment from sklearn.utils.validation import check_consistent_length, check_array __all__ = ["consensus_score"] def _check_rows_and_columns(a, b): """Unpacks the row and column arrays and checks their shape.""" check_consistent_length(*a) check_consistent_length(*b) checks = lambda x: check_array(x, ensure_2d=False) a_rows, a_cols = map(checks, a) b_rows, b_cols = map(checks, b) return a_rows, a_cols, b_rows, b_cols def _jaccard(a_rows, a_cols, b_rows, b_cols): """Jaccard coefficient on the elements of the two biclusters.""" intersection = ((a_rows * b_rows).sum() * (a_cols * b_cols).sum()) a_size = a_rows.sum() * a_cols.sum() b_size = b_rows.sum() * b_cols.sum() return intersection / (a_size + b_size - intersection) def _pairwise_similarity(a, b, similarity): """Computes pairwise similarity matrix. result[i, j] is the Jaccard coefficient of a's bicluster i and b's bicluster j. """ a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b) n_a = a_rows.shape[0] n_b = b_rows.shape[0] result = np.array(list(list(similarity(a_rows[i], a_cols[i], b_rows[j], b_cols[j]) for j in range(n_b)) for i in range(n_a))) return result def consensus_score(a, b, similarity="jaccard"): """The similarity of two sets of biclusters. Similarity between individual biclusters is computed. Then the best matching between sets is found using the Hungarian algorithm. The final score is the sum of similarities divided by the size of the larger set. Read more in the :ref:`User Guide <biclustering>`. Parameters ---------- a : (rows, columns) Tuple of row and column indicators for a set of biclusters. b : (rows, columns) Another set of biclusters like ``a``. similarity : string or function, optional, default: "jaccard" May be the string "jaccard" to use the Jaccard coefficient, or any function that takes four arguments, each of which is a 1d indicator vector: (a_rows, a_columns, b_rows, b_columns). References ---------- * Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis for bicluster acquisition <https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__. """ if similarity == "jaccard": similarity = _jaccard matrix = _pairwise_similarity(a, b, similarity) indices = linear_assignment(1. - matrix) n_a = len(a[0]) n_b = len(b[0]) return matrix[indices[:, 0], indices[:, 1]].sum() / max(n_a, n_b)
bsd-3-clause
sunshinelover/chanlun
vn.trader/ctaAlgo/uiChanlunWidget.py
1
68647
# encoding: UTF-8 """ 缠论模块相关的GUI控制组件 """ from vtGateway import VtSubscribeReq from uiBasicWidget import QtGui, QtCore, BasicCell,BasicMonitor,TradingWidget from eventEngine import * from ctaBase import * import pyqtgraph as pg import numpy as np import pymongo from pymongo.errors import * from datetime import datetime, timedelta from ctaHistoryData import HistoryDataEngine import time import types import pandas as pd ######################################################################## class MyStringAxis(pg.AxisItem): def __init__(self, xdict, *args, **kwargs): pg.AxisItem.__init__(self, *args, **kwargs) self.x_values = np.asarray(xdict.keys()) self.x_strings = xdict.values() def tickStrings(self, values, scale, spacing): strings = [] for v in values: # vs is the original tick value vs = v * scale # if we have vs in our values, show the string # otherwise show nothing if vs in self.x_values: # Find the string with x_values closest to vs vstr = self.x_strings[np.abs(self.x_values - vs).argmin()] else: vstr = "" strings.append(vstr) return strings ######################################################################## class ChanlunEngineManager(QtGui.QWidget): """chanlun引擎管理组件""" signal = QtCore.pyqtSignal(type(Event())) # ---------------------------------------------------------------------- def __init__(self, chanlunEngine, eventEngine, mainEngine, parent=None): """Constructor""" super(ChanlunEngineManager, self).__init__(parent) self.chanlunEngine = chanlunEngine self.eventEngine = eventEngine self.mainEngine = mainEngine self.penLoaded = False self.segmentLoaded = False self.tickLoaded = False self.zhongShuLoaded = False self.instrumentid = '' self.initUi() self.registerEvent() # 记录日志 self.chanlunEngine.writeChanlunLog(u'缠论引擎启动成功') # ---------------------------------------------------------------------- def initUi(self): """初始化界面""" self.setWindowTitle(u'缠论策略') # 期货代码输入框 self.codeEdit = QtGui.QLineEdit() self.codeEdit.setPlaceholderText(u'在此输入期货代码') self.codeEdit.setMaximumWidth(200) self.data = pd.DataFrame() #画图所需数据, 重要 self.fenX = [] #分笔分段所需X轴坐标 self.fenY = [] #分笔分段所需Y轴坐标 self.zhongshuPos = [] #中枢的位置 self.zhongShuType = [] #中枢的方向 # 金融图 self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data) self.TickW = None # MongoDB数据库相关 self.__mongoConnected = False self.__mongoConnection = None # 调用函数 self.__connectMongo() # 按钮 penButton = QtGui.QPushButton(u'分笔') segmentButton = QtGui.QPushButton(u'分段') zhongshuButton = QtGui.QPushButton(u'走势中枢') shopButton = QtGui.QPushButton(u'买卖点') restoreButton = QtGui.QPushButton(u'还原') penButton.clicked.connect(self.pen) segmentButton.clicked.connect(self.segment) zhongshuButton.clicked.connect(self.zhongShu) shopButton.clicked.connect(self.shop) restoreButton.clicked.connect(self.restore) # Chanlun组件的日志监控 self.chanlunLogMonitor = QtGui.QTextEdit() self.chanlunLogMonitor.setReadOnly(True) self.chanlunLogMonitor.setMaximumHeight(180) # 设置布局 self.hbox2 = QtGui.QHBoxLayout() self.hbox2.addWidget(self.codeEdit) self.hbox2.addWidget(penButton) self.hbox2.addWidget(segmentButton) self.hbox2.addWidget(zhongshuButton) self.hbox2.addWidget(shopButton) self.hbox2.addWidget(restoreButton) self.hbox2.addStretch() tickButton = QtGui.QPushButton(u'Tick') oneMButton = QtGui.QPushButton(u"1分") fiveMButton = QtGui.QPushButton(u'5分') fifteenMButton = QtGui.QPushButton(u'15分') thirtyMButton = QtGui.QPushButton(u'30分') sixtyMButton = QtGui.QPushButton(u'60分') dayButton = QtGui.QPushButton(u'日') weekButton = QtGui.QPushButton(u'周') monthButton = QtGui.QPushButton(u'月') oneMButton.checked = True self.vbox1 = QtGui.QVBoxLayout() tickButton.clicked.connect(self.openTick) oneMButton.clicked.connect(self.oneM) fiveMButton.clicked.connect(self.fiveM) fifteenMButton.clicked.connect(self.fifteenM) thirtyMButton.clicked.connect(self.thirtyM) sixtyMButton.clicked.connect(self.sixtyM) dayButton.clicked.connect(self.daily) weekButton.clicked.connect(self.weekly) monthButton.clicked.connect(self.monthly) self.vbox2 = QtGui.QVBoxLayout() self.vbox1.addWidget(self.PriceW) self.vbox2.addWidget(tickButton) self.vbox2.addWidget(oneMButton) self.vbox2.addWidget(fiveMButton) self.vbox2.addWidget(fifteenMButton) self.vbox2.addWidget(thirtyMButton) self.vbox2.addWidget(sixtyMButton) self.vbox2.addWidget(dayButton) self.vbox2.addWidget(weekButton) self.vbox2.addWidget(monthButton) self.vbox2.addStretch() self.hbox3 = QtGui.QHBoxLayout() self.hbox3.addStretch() self.hbox3.addLayout(self.vbox1) self.hbox3.addLayout(self.vbox2) self.vbox = QtGui.QVBoxLayout() self.vbox.addLayout(self.hbox2) self.vbox.addLayout(self.hbox3) self.vbox.addWidget(self.chanlunLogMonitor) self.setLayout(self.vbox) self.codeEdit.returnPressed.connect(self.updateSymbol) #----------------------------------------------------------------------- #从通联数据端获取历史数据 def downloadData(self, symbol, unit): listBar = [] #K线数据 num = 0 #从通联客户端获取K线数据 historyDataEngine = HistoryDataEngine() # unit为int型获取分钟数据,为String类型获取日周月K线数据 if type(unit) is types.IntType: #从通联数据端获取当日分钟数据并存入数据库 historyDataEngine.downloadFuturesIntradayBar(symbol, unit) # 从数据库获取前几天的分钟数据 cx = self.getDbData(symbol, unit) if cx: for data in cx: barOpen = data['open'] barClose = data['close'] barLow = data['low'] barHigh = data['high'] barTime = data['datetime'] listBar.append((num, barTime, barOpen, barClose, barLow, barHigh)) num += 1 elif type(unit) is types.StringType: data = historyDataEngine.downloadFuturesBar(symbol, unit) if data: for d in data: barOpen = d.get('openPrice', 0) barClose = d.get('closePrice', 0) barLow = d.get('lowestPrice', 0) barHigh = d.get('highestPrice', 0) if unit == "daily": barTime = d.get('tradeDate', '').replace('-', '') else: barTime = d.get('endDate', '').replace('-', '') listBar.append((num, barTime, barOpen, barClose, barLow, barHigh)) num += 1 if unit == "monthly" or unit == "weekly": listBar.reverse() else: print "参数格式错误" return #将List数据转换成dataFormat类型,方便处理 df = pd.DataFrame(listBar, columns=['num', 'time', 'open', 'close', 'low', 'high']) df.index = df['time'].tolist() df = df.drop('time', 1) return df #----------------------------------------------------------------------- #从数据库获取前两天的分钟数据 def getDbData(self, symbol, unit): #周六周日不交易,无分钟数据 # 给数据库命名 dbname = '' days = 7 if unit == 1: dbname = MINUTE_DB_NAME elif unit == 5: dbname = MINUTE5_DB_NAME elif unit == 15: dbname = MINUTE15_DB_NAME elif unit == 30: dbname = MINUTE30_DB_NAME elif unit == 60: dbname = MINUTE60_DB_NAME weekday = datetime.now().weekday() # weekday() 返回的是0-6是星期一到星期日 if days == 2: if weekday == 6: aDay = timedelta(days=3) elif weekday == 0 or weekday == 1: aDay = timedelta(days=4) else: aDay = timedelta(days=2) else: aDay = timedelta(days=7) startDate = (datetime.now() - aDay).strftime('%Y%m%d') print startDate if self.__mongoConnected: collection = self.__mongoConnection[dbname][symbol] cx = collection.find({'date': {'$gte': startDate}}) return cx else: return None #---------------------------------------------------------------------------------- #"""合约变化""" def updateSymbol(self): # 读取组件数据 instrumentid = str(self.codeEdit.text()) self.chanlunEngine.writeChanlunLog(u'查询合约%s' % (instrumentid)) # 从通联数据客户端获取当日分钟数据 self.data = self.downloadData(instrumentid, 1) if self.data.empty: self.chanlunEngine.writeChanlunLog(u'合约%s 不存在' % (instrumentid)) else: if self.tickLoaded: self.vbox1.removeWidget(self.TickW) self.TickW.deleteLater() else: self.vbox1.removeWidget(self.PriceW) self.PriceW.deleteLater() self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data) self.vbox1.addWidget(self.PriceW) # 画K线图 self.PriceW.plotHistorticData() self.chanlunEngine.writeChanlunLog(u'打开合约%s 1分钟K线图' % (instrumentid)) self.penLoaded = False self.segmentLoaded = False self.tickLoaded = False self.zhongShuLoaded = False # # 订阅合约[仿照ctaEngine.py写的] # # 先取消订阅之前的合约,再订阅最新输入的合约 # contract = self.mainEngine.getContract(self.instrumentid) # if contract: # req = VtSubscribeReq() # req.symbol = contract.symbol # self.mainEngine.unsubscribe(req, contract.gatewayName) # # contract = self.mainEngine.getContract(instrumentid) # if contract: # req = VtSubscribeReq() # req.symbol = contract.symbol # self.mainEngine.subscribe(req, contract.gatewayName) # else: # self.chanlunEngine.writeChanlunLog(u'交易合约%s无法找到' % (instrumentid)) # # # 重新注册事件监听 # self.eventEngine.unregister(EVENT_TICK + self.instrumentid, self.signal.emit) # self.eventEngine.register(EVENT_TICK + instrumentid, self.signal.emit) # 更新目前的合约 self.instrumentid = instrumentid def oneM(self): "打开1分钟K线图" self.chanlunEngine.writeChanlunLog(u'打开合约%s 1分钟K线图' % (self.instrumentid)) # 从通联数据客户端获取数据 self.data = self.downloadData(self.instrumentid, 1) if self.tickLoaded: self.vbox1.removeWidget(self.TickW) self.TickW.deleteLater() else: self.vbox1.removeWidget(self.PriceW) self.PriceW.deleteLater() self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data) self.vbox1.addWidget(self.PriceW) # 画K线图 self.PriceW.plotHistorticData() self.tickLoaded = False self.penLoaded = False self.segmentLoaded = False self.zhongShuLoaded = False # ---------------------------------------------------------------------- def fiveM(self): "打开5分钟K线图" self.chanlunEngine.writeChanlunLog(u'打开合约%s 5分钟K线图' % (self.instrumentid)) # 从通联数据客户端获取数据 self.data = self.downloadData(self.instrumentid, 5) if self.tickLoaded: self.vbox1.removeWidget(self.TickW) self.TickW.deleteLater() else: self.vbox1.removeWidget(self.PriceW) self.PriceW.deleteLater() self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data) self.vbox1.addWidget(self.PriceW) # 画K线图 self.PriceW.plotHistorticData() self.tickLoaded = False self.penLoaded = False self.segmentLoaded = False self.zhongShuLoaded = False # ---------------------------------------------------------------------- def fifteenM(self): "打开15分钟K线图" self.chanlunEngine.writeChanlunLog(u'打开合约%s 15分钟K线图' % (self.instrumentid)) # 从通联数据客户端获取数据 self.data = self.downloadData(self.instrumentid, 15) if self.tickLoaded: self.vbox1.removeWidget(self.TickW) self.TickW.deleteLater() else: self.vbox1.removeWidget(self.PriceW) self.PriceW.deleteLater() self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data) self.vbox1.addWidget(self.PriceW) # 画K线图 self.PriceW.plotHistorticData() self.tickLoaded = False self.penLoaded = False self.segmentLoaded = False self.zhongShuLoaded = False # ---------------------------------------------------------------------- def thirtyM(self): "打开30分钟K线图" self.chanlunEngine.writeChanlunLog(u'打开合约%s 30分钟K线图' % (self.instrumentid)) # 从通联数据客户端获取数据 self.data = self.downloadData(self.instrumentid, 30) if self.tickLoaded: self.vbox1.removeWidget(self.TickW) self.TickW.deleteLater() else: self.vbox1.removeWidget(self.PriceW) self.PriceW.deleteLater() self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data) self.vbox1.addWidget(self.PriceW) # 画K线图 self.PriceW.plotHistorticData() self.tickLoaded = False self.penLoaded = False self.segmentLoaded = False self.zhongShuLoaded = False # ---------------------------------------------------------------------- def sixtyM(self): "打开60分钟K线图" self.chanlunEngine.writeChanlunLog(u'打开合约%s 60分钟K线图' % (self.instrumentid)) # 从通联数据客户端获取数据 self.data = self.downloadData(self.instrumentid, 60) if self.tickLoaded: self.vbox1.removeWidget(self.TickW) self.TickW.deleteLater() else: self.vbox1.removeWidget(self.PriceW) self.PriceW.deleteLater() self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data) self.vbox1.addWidget(self.PriceW) # 画K线图 self.PriceW.plotHistorticData() self.tickLoaded = False self.penLoaded = False self.segmentLoaded = False self.zhongShuLoaded = False # ---------------------------------------------------------------------- def daily(self): """打开日K线图""" self.chanlunEngine.writeChanlunLog(u'打开合约%s 日K线图' % (self.instrumentid)) # 从通联数据客户端获取数据 self.data = self.downloadData(self.instrumentid, "daily") if self.tickLoaded: self.vbox1.removeWidget(self.TickW) self.TickW.deleteLater() else: self.vbox1.removeWidget(self.PriceW) self.PriceW.deleteLater() self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data) self.vbox1.addWidget(self.PriceW) # 画K线图 self.PriceW.plotHistorticData() self.tickLoaded = False self.penLoaded = False self.segmentLoaded = False self.zhongShuLoaded = False # ---------------------------------------------------------------------- def weekly(self): """打开周K线图""" self.chanlunEngine.writeChanlunLog(u'打开合约%s 周K线图' % (self.instrumentid)) # 从通联数据客户端获取数据 self.data = self.downloadData(self.instrumentid, "weekly") if self.tickLoaded: self.vbox1.removeWidget(self.TickW) self.TickW.deleteLater() else: self.vbox1.removeWidget(self.PriceW) self.PriceW.deleteLater() self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data) self.vbox1.addWidget(self.PriceW) # 画K线图 self.PriceW.plotHistorticData() self.tickLoaded = False self.penLoaded = False self.segmentLoaded = False self.zhongShuLoaded = False def monthly(self): """打开月K线图""" self.chanlunEngine.writeChanlunLog(u'打开合约%s 月K线图' % (self.instrumentid)) # 从通联数据客户端获取数据并画图 self.data = self.downloadData(self.instrumentid, "monthly") if self.tickLoaded: self.vbox1.removeWidget(self.TickW) self.TickW.deleteLater() else: self.vbox1.removeWidget(self.PriceW) self.PriceW.deleteLater() self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data) self.vbox1.addWidget(self.PriceW) # 画K线图 self.PriceW.plotHistorticData() self.tickLoaded = False self.penLoaded = False self.segmentLoaded = False self.zhongShuLoaded = False # ---------------------------------------------------------------------- def openTick(self): """切换成tick图""" self.chanlunEngine.writeChanlunLog(u'打开tick图') self.vbox1.removeWidget(self.PriceW) self.PriceW.deleteLater() self.TickW = TickWidget(self.eventEngine, self.chanlunEngine) self.vbox1.addWidget(self.TickW) self.tickLoaded = True self.penLoaded = False self.segmentLoaded = False self.zhongShuLoaded = False # ---------------------------------------------------------------------- def restore(self): """还原初始k线状态""" self.chanlunEngine.writeChanlunLog(u'还原加载成功') if self.tickLoaded: self.vbox1.removeWidget(self.TickW) self.TickW.deleteLater() else: self.vbox1.removeWidget(self.PriceW) self.PriceW.deleteLater() self.data = self.downloadData(self.instrumentid, 1) self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, self.data, self) self.vbox1.addWidget(self.PriceW) # 画K线图 self.PriceW.plotHistorticData() self.chanlunEngine.writeChanlunLog(u'还原为1分钟k线图') self.penLoaded = False self.segmentLoaded = False self.tickLoaded = False # ---------------------------------------------------------------------- def pen(self): """加载分笔""" # 先合并K线数据,记录新建PriceW之前合并K线的数据 if not self.penLoaded: after_fenxing = self.judgeInclude() #判断self.data中K线数据的包含关系 # 清空画布时先remove已有的Widget再新建 self.vbox1.removeWidget(self.PriceW) self.PriceW.deleteLater() self.PriceW = PriceWidget(self.eventEngine, self.chanlunEngine, after_fenxing) self.vbox1.addWidget(self.PriceW) #使用合并K线的数据重新画K线图 self.plotAfterFenXing(after_fenxing) # 找出顶和底 fenxing_data, fenxing_type = self.findTopAndLow(after_fenxing) arrayFenxingdata = np.array(fenxing_data) arrayTypedata = np.array(fenxing_type) self.fenY = [] self.fenX = [m[0] for m in arrayFenxingdata] fenbiY1 = [m[4] for m in arrayFenxingdata] # 顶分型标志最高价 fenbiY2 = [m[3] for m in arrayFenxingdata] # 底分型标志最低价 for i in xrange(len(self.fenX)): if arrayTypedata[i] == 1: self.fenY.append(fenbiY1[i]) else: self.fenY.append(fenbiY2[i]) if not self.penLoaded: if self.fenX: self.fenX.append(self.fenX[-1]) self.fenY.append(self.fenY[-1]) print "self.fenX: ", self.fenX print "self.fenY: ", self.fenY self.fenbi(self.fenX, self.fenY) self.fenX.pop() self.fenY.pop() self.chanlunEngine.writeChanlunLog(u'分笔加载成功') self.penLoaded = True # ---------------------------------------------------------------------- def segment(self): if not self.penLoaded: self.pen() #先分笔才能分段 segmentX = [] #分段点X轴值 segmentY = [] #分段点Y轴值 temp_type = 0 #标志线段方向,向上为1,向下为-1, 未判断前三笔是否重合为0 i = 0 while i < len(self.fenX) - 4: if temp_type == 0: if self.fenY[i] > self.fenY[i+1] and self.fenY[i] > self.fenY[i+3]: temp_type = -1 #向下线段,三笔重合 segmentX.append(self.fenX[i]) segmentY.append(self.fenY[i]) elif self.fenY[i] < self.fenY[i+1] and self.fenY[i] < self.fenY[i+3]: temp_type = 1 #向上线段,三笔重合 segmentX.append(self.fenX[i]) segmentY.append(self.fenY[i]) else: temp_type = 0 i += 1 continue if temp_type == 1: #向上线段 j = i+1 high = [] # 记录顶 low = [] # 记录低 while j < len(self.fenX) - 1: #记录顶底 high.append(self.fenY[j]) low.append(self.fenY[j+1]) j += 2 if self.fenY[i+4] < self.fenY[i+1]: #向上线段被向下笔破坏 j = 0 while j < len(high)-2: # 顶底出现顶分型,向上线段结束 if high[j+1] > high[j] and high[j+1] > high[j+2]: num = i + 2 * j + 3 #线段结束点位置 segmentX.append(self.fenX[num]) segmentY.append(self.fenY[num]) i = num temp_type = -1 #向上线段一定由向下线段结束 break j += 1 if j == len(high)-2: break else: #向上线段未被向下笔破坏 j = 1 while j < len(high)-2: # 顶底出现底分型,向上线段结束 if low[j + 1] < low[j] and low[j + 1] < low[j + 2]: num = i + 2 * j + 1 # 线段结束点位置 segmentX.append(self.fenX[num]) segmentY.append(self.fenY[num]) i = num temp_type = -1 # 向上线段一定由向下线段结束 break j += 1 if j == len(high)-2: break elif temp_type == -1: # 向下线段 j = i + 1 high = [] # 记录顶 low = [] # 记录低 while j < len(self.fenX) - 1: # 记录顶底 high.append(self.fenY[j + 1]) low.append(self.fenY[j]) j += 2 if self.fenY[i + 4] > self.fenY[i + 1]: # 向下线段被向上笔破坏 j = 0 while j < len(high) - 2: # 顶底出现底分型,向下线段结束 if low[j + 1] < low[j] and low[j + 1] < low[j + 2]: num = i + 2 * j + 3 # 线段结束点位置 segmentX.append(self.fenX[num]) segmentY.append(self.fenY[num]) i = num temp_type = 1 # 向下线段一定由向上线段结束 break j += 1 if j == len(high) - 2: break else: # 向下线段未被向上笔破坏 j = 1 while j < len(high) - 2: # 顶底出现顶分型,向下线段结束 if high[j + 1] > high[j] and high[j + 1] > high[j + 2]: num = i + 2 * j + 1 # 线段结束点位置 segmentX.append(self.fenX[num]) segmentY.append(self.fenY[num]) i = num temp_type = 1 # 向下线段一定由向上线段结束 break j += 1 if j == len(high) - 2: break print "segmentX: ", segmentX print "segmentY: ", segmentY if not self.segmentLoaded: if len(segmentX) > 1: segmentX.append(segmentX[-1]) segmentY.append(segmentY[-1]) segmentX = [int(x) for x in segmentX] segmentY = [int(y) for y in segmentY] self.fenduan(segmentX, segmentY) self.chanlunEngine.writeChanlunLog(u'分段加载成功') self.segmentLoaded = True # ---------------------------------------------------------------------- def updateChanlunLog(self, event): """更新缠论相关日志""" log = event.dict_['data'] # print type(log) if(log.logTime): content = '\t'.join([log.logTime, log.logContent]) self.chanlunLogMonitor.append(content) else: print 0 #----------------------------------------------------------------------- def zhongShu(self): if not self.penLoaded: self.pen() # 先分笔才能画走势中枢 # temp_type = 0 # 标志中枢方向,向上为1,向下为-1 i = 0 temp_high, temp_low = 0, 0 minX, maxY = 0, 0 self.zhongshuPos = [] # 记录所有的中枢开始段和结束段的位置 self.zhongShuType = [] #记录所有中枢的方向 while i < len(self.fenX) - 4: if (self.fenY[i] > self.fenY[i + 1] and self.fenY[i + 1] < self.fenY[i + 4]): #判断进入段方向 temp_low = max(self.fenY[i + 1], self.fenY[i + 3]) temp_high = min(self.fenY[i + 2], self.fenY[i + 4]) #记录中枢内顶的最小值与底的最大值 minX = self.fenX[i+1] self.zhongshuPos.append(i) self.zhongShuType.append(-1) j = i while i < len(self.fenX) - 4: j = i if self.fenY[i + 1] < self.fenY[i + 4] and self.fenY[i + 4] > temp_low and self.fenY[i + 3] < temp_high : maxX = self.fenX[i+4] if self.fenY[i + 3] > temp_low: temp_low = self.fenY[i + 3] if self.fenY[i + 4] < temp_high: temp_high = self.fenY[i + 4] i = i + 1 elif self.fenY[i + 1] > self.fenY[i + 4] and self.fenY[i + 4] < temp_high and self.fenY[i + 3] > temp_low : maxX = self.fenX[i + 4] if self.fenY[i + 3] < temp_high: temp_high = self.fenY[i + 3] if self.fenY[i + 4] > temp_low: temp_low = self.fenY[i + 4] i = i + 1 if j == i: break elif (self.fenY[i] < self.fenY[i + 1] and self.fenY[i + 1] > self.fenY[i + 4]): temp_high = min(self.fenY[i + 1], self.fenY[i + 3]) temp_low = max(self.fenY[i + 2], self.fenY[i + 4]) minX = self.fenX[i + 1] self.zhongshuPos.append(i) self.zhongShuType.append(1) j = i while i < len(self.fenX) - 4: j = i if self.fenY[i + 1] > self.fenY[i + 4] and self.fenY[i + 4] < temp_high and self.fenY[i + 3] > temp_low: maxX = self.fenX[i + 4] if self.fenY[i + 3] < temp_high: temp_high = self.fenY[i + 3] if self.fenY[i + 4] > temp_low: temp_low = self.fenY[i + 4] i = i + 1 elif self.fenY[i + 1] < self.fenY[i + 4] and self.fenY[i + 4] > temp_low and self.fenY[i + 3] < temp_high: maxX = self.fenX[i + 4] if self.fenY[i + 3] > temp_low: temp_low = self.fenY[i + 3] if self.fenY[i + 4] < temp_high: temp_high = self.fenY[i + 4] i = i + 1 if i == j: break else: i += 1 continue # 画出当前判断出的中枢 if minX != 0 and maxX == 0: maxX = self.fenX[i+4] i = i + 1 self.zhongshuPos.append(i + 4) else: self.zhongshuPos.append(i + 3) minY, maxY = temp_low, temp_high print minX, minY, maxX, maxY if int(maxY) > int(minY): plotX = [minX, minX, maxX, maxX, minX] plotY = [minY, maxY, maxY, minY, minY] plotX = [int(x) for x in plotX] plotY = [int(y) for y in plotY] self.zhongshu(plotX, plotY) i = i + 4 self.zhongShuLoaded = True self.chanlunEngine.writeChanlunLog(u'走势中枢加载成功') # ---------------------------------------------------------------------- def shop(self): """加载买卖点""" if not self.zhongShuLoaded: self.zhongShu() i = 0 while i < len(self.zhongShuType) - 1: startPos, endPos = self.zhongshuPos[2*i], self.zhongshuPos[2*i + 1] # 中枢开始段的位置和结束段的位置 startY = self.fenY[startPos + 1] - self.fenY[startPos] # 开始段Y轴距离 startX = self.fenX[startPos + 1] - self.fenX[startPos] # 开始段X轴距离 startK = abs(startY * startX) # 开始段投影面积 endY = self.fenY[endPos + 1] - self.fenY[endPos] # 结束段Y轴距离 endX = self.fenX[endPos + 1] - self.fenX[endPos] # 结束段段X轴距离 endK = abs(endY * endX) # 开始段投影面积 if endK < startK: print startPos, endPos if self.zhongShuType[i] == 1 and self.zhongShuType[i + 1] == -1: # 一卖 self.sellpoint([self.fenX[endPos + 1]], [self.fenY[endPos + 1]], 1) # 二卖,一卖后一个顶点 self.sellpoint([self.fenX[endPos + 3]], [self.fenY[endPos + 3]], 2) # 三卖,一卖之后中枢结束段的第一个顶 i = i + 1 nextPos = self.zhongshuPos[2*i + 1] # 下一个中枢结束位置 if nextPos + 1 < len(self.fenY): if self.fenY[nextPos + 1] > self.fenY[nextPos]: self.sellpoint([self.fenX[nextPos + 1]], [self.fenY[nextPos + 1]], 3) else: self.sellpoint([self.fenX[nextPos]], [self.fenY[nextPos]], 3) elif self.zhongShuType[i] == -1 and self.zhongShuType[i + 1] == 1: # 一买 self.buypoint([self.fenX[endPos + 1]], [self.fenY[endPos + 1]], 1) # 二买,一买后一个底点 self.buypoint([self.fenX[endPos + 3]], [self.fenY[endPos + 3]], 2) # 三买,一买之后中枢结束段的第一个顶 i = i + 1 nextPos = self.zhongshuPos[2*i + 1] # 下一个中枢结束位置 if nextPos + 1 < len(self.fenY): if self.fenY[nextPos + 1] < self.fenY[nextPos]: self.buypoint([self.fenX[nextPos + 1]], [self.fenY[nextPos + 1]], 3) else: self.buypoint([self.fenX[nextPos]], [self.fenY[nextPos]], 3) i = i + 1 # 接着判断之后的中枢是否出现背驰 self.chanlunEngine.writeChanlunLog(u'买卖点加载成功') # ---------------------------------------------------------------------- def fenbi(self, fenbix, fenbiy): self.PriceW.pw2.plotItem.plot(x=fenbix, y=fenbiy, pen=QtGui.QPen(QtGui.QColor(255, 236, 139))) def fenduan(self, fenduanx, fenduany): self.PriceW.pw2.plot(x=fenduanx, y=fenduany, symbol='o', pen=QtGui.QPen(QtGui.QColor(131, 111, 255))) def zhongshu(self, zhongshux, zhongshuy): self.PriceW.pw2.plot(x=zhongshux, y=zhongshuy, pen=QtGui.QPen(QtGui.QColor(255,165,0))) def buypoint(self, buyx, buyy, point): if point == 1: self.PriceW.pw2.plot(x=buyx, y=buyy, symbolSize=18, symbolBrush=(255,0,0), symbolPen=(255,0,0), symbol='star') elif point == 2: self.PriceW.pw2.plot(x=buyx, y=buyy, symbolSize=18, symbolBrush=(238,130,238), symbolPen=(238,130,238),symbol='star') elif point == 3: self.PriceW.pw2.plot(x=buyx, y=buyy, symbolSize=18, symbolBrush=(138,43,226), symbolPen=(138,43,226),symbol='star') def sellpoint(self, sellx, selly, point): if point == 1: self.PriceW.pw2.plot(x=sellx, y=selly, symbolSize=18, symbolBrush=(119,172,48), symbolPen=(119,172,48), symbol='star') elif point == 2: self.PriceW.pw2.plot(x=sellx, y=selly, symbolSize=18, symbolBrush=(221,221,34), symbolPen=(221,221,34),symbol='star') elif point == 3: self.PriceW.pw2.plot(x=sellx, y=selly, symbolSize=18, symbolBrush=(179,158,77), symbolPen=(179,158,77),symbol='star') # ---------------------------------------------------------------------- # 判断包含关系,仿照聚框,合并K线数据 def judgeInclude(self): ## 判断包含关系 k_data = self.data # 保存分型后dataFrame的值 after_fenxing = pd.DataFrame() temp_data = k_data[:1] zoushi = [3] # 3-持平 4-向下 5-向上 for i in xrange(len(k_data)): case1 = temp_data.high[-1] >= k_data.high[i] and temp_data.low[-1] <= k_data.low[i] # 第1根包含第2根 case2 = temp_data.high[-1] <= k_data.high[i] and temp_data.low[-1] >= k_data.low[i] # 第2根包含第1根 case3 = temp_data.high[-1] == k_data.high[i] and temp_data.low[-1] == k_data.low[i] # 第1根等于第2根 case4 = temp_data.high[-1] > k_data.high[i] and temp_data.low[-1] > k_data.low[i] # 向下趋势 case5 = temp_data.high[-1] < k_data.high[i] and temp_data.low[-1] < k_data.low[i] # 向上趋势 if case3: zoushi.append(3) continue elif case1: print temp_data if zoushi[-1] == 4: temp_data.ix[0, 4] = k_data.high[i] #向下走取高点的低点 else: temp_data.ix[0, 3] = k_data.low[i] #向上走取低点的高点 elif case2: temp_temp = temp_data[-1:] temp_data = k_data[i:i + 1] if zoushi[-1] == 4: temp_data.ix[0, 4] = temp_temp.high[0] else: temp_data.ix[0, 3] = temp_temp.low[0] elif case4: zoushi.append(4) after_fenxing = pd.concat([after_fenxing, temp_data], axis=0) temp_data = k_data[i:i + 1] elif case5: zoushi.append(5) after_fenxing = pd.concat([after_fenxing, temp_data], axis=0) temp_data = k_data[i:i + 1] return after_fenxing # ---------------------------------------------------------------------- #画出合并后的K线图,分笔 def plotAfterFenXing(self, after_fenxing): #判断包含关系,合并K线 for i in xrange(len(after_fenxing)): #处理k线的最大最小值、开盘收盘价,合并后k线不显示影线。 after_fenxing.iloc[i, 0] = i if after_fenxing.open[i] > after_fenxing.close[i]: after_fenxing.iloc[i, 1] = after_fenxing.high[i] after_fenxing.iloc[i, 2] = after_fenxing.low[i] else: after_fenxing.iloc[i, 1] = after_fenxing.low[i] after_fenxing.iloc[i, 2] = after_fenxing.high[i] self.PriceW.onBarAfterFenXing(i, after_fenxing.index[i], after_fenxing.open[i], after_fenxing.close[i], after_fenxing.low[i], after_fenxing.high[i]) self.PriceW.plotKlineAfterFenXing() print "plotKLine after fenxing" # ---------------------------------------------------------------------- # 找出顶和底 def findTopAndLow(self, after_fenxing): temp_num = 0 # 上一个顶或底的位置 temp_high = 0 # 上一个顶的high值 temp_low = 0 # 上一个底的low值 temp_type = 0 # 上一个记录位置的类型 i = 1 fenxing_type = [] # 记录分型点的类型,1为顶分型,-1为底分型 fenxing_data = pd.DataFrame() # 分型点的DataFrame值 while (i < len(after_fenxing) - 1): case1 = after_fenxing.high[i - 1] < after_fenxing.high[i] and after_fenxing.high[i] > after_fenxing.high[i + 1] # 顶分型 case2 = after_fenxing.low[i - 1] > after_fenxing.low[i] and after_fenxing.low[i] < after_fenxing.low[i + 1] # 底分型 if case1: if temp_type == 1: # 如果上一个分型为顶分型,则进行比较,选取高点更高的分型 if after_fenxing.high[i] <= temp_high: i += 1 else: temp_high = after_fenxing.high[i] temp_num = i temp_type = 1 i += 1 elif temp_type == 2: # 如果上一个分型为底分型,则记录上一个分型,用当前分型与后面的分型比较,选取同向更极端的分型 if temp_low >= after_fenxing.high[i]: # 如果上一个底分型的底比当前顶分型的顶高,则跳过当前顶分型。 i += 1 elif i < temp_num + 4: # 顶和底至少5k线 i += 1 else: fenxing_type.append(-1) fenxing_data = pd.concat([fenxing_data, after_fenxing[temp_num:temp_num + 1]], axis=0) temp_high = after_fenxing.high[i] temp_num = i temp_type = 1 i += 1 else: temp_high = after_fenxing.high[i] temp_num = i temp_type = 1 i += 1 elif case2: if temp_type == 2: # 如果上一个分型为底分型,则进行比较,选取低点更低的分型 if after_fenxing.low[i] >= temp_low: i += 1 else: temp_low = after_fenxing.low[i] temp_num = i temp_type = 2 i += 1 elif temp_type == 1: # 如果上一个分型为顶分型,则记录上一个分型,用当前分型与后面的分型比较,选取同向更极端的分型 if temp_high <= after_fenxing.low[i]: # 如果上一个顶分型的底比当前底分型的底低,则跳过当前底分型。 i += 1 elif i < temp_num + 4: # 顶和底至少5k线 i += 1 else: fenxing_type.append(1) fenxing_data = pd.concat([fenxing_data, after_fenxing[temp_num:temp_num + 1]], axis=0) temp_low = after_fenxing.low[i] temp_num = i temp_type = 2 i += 1 else: temp_low = after_fenxing.low[i] temp_num = i temp_type = 2 i += 1 else: i += 1 # if fenxing_type: # if fenxing_type[-1] == 1 and temp_type == 2: # fenxing_type.append(-1) # fenxing_data = pd.concat([fenxing_data, after_fenxing[temp_num:temp_num + 1]], axis=0) # # if fenxing_type[-1] == -1 and temp_type == 1: # fenxing_type.append(1) # fenxing_data = pd.concat([fenxing_data, after_fenxing[temp_num:temp_num + 1]], axis=0) return fenxing_data, fenxing_type # ---------------------------------------------------------------------- # 连接MongoDB数据库 def __connectMongo(self): try: self.__mongoConnection = pymongo.MongoClient("localhost", 27017) self.__mongoConnected = True except ConnectionFailure: pass # ---------------------------------------------------------------------- def registerEvent(self): """注册事件监听""" self.signal.connect(self.updateChanlunLog) self.eventEngine.register(EVENT_CHANLUN_LOG, self.signal.emit) ######################################################################## class PriceWidget(QtGui.QWidget): """用于显示价格走势图""" signal = QtCore.pyqtSignal(type(Event())) symbol = '' class CandlestickItem(pg.GraphicsObject): def __init__(self, data): pg.GraphicsObject.__init__(self) self.data = data ## data must have fields: time, open, close, min, max self.generatePicture() def generatePicture(self): ## pre-computing a QPicture object allows paint() to run much more quickly, ## rather than re-drawing the shapes every time. self.picture = QtGui.QPicture() p = QtGui.QPainter(self.picture) p.setPen(pg.mkPen(color='w', width=0.4)) # 0.4 means w*2 # w = (self.data[1][0] - self.data[0][0]) / 3. w = 0.2 for (n, t, open, close, min, max) in self.data: p.drawLine(QtCore.QPointF(n, min), QtCore.QPointF(n, max)) if open > close: p.setBrush(pg.mkBrush('g')) else: p.setBrush(pg.mkBrush('r')) p.drawRect(QtCore.QRectF(n-w, open, w*2, close-open)) pg.setConfigOption('leftButtonPan', False) p.end() def paint(self, p, *args): p.drawPicture(0, 0, self.picture) def boundingRect(self): ## boundingRect _must_ indicate the entire area that will be drawn on ## or else we will get artifacts and possibly crashing. ## (in this case, QPicture does all the work of computing the bouning rect for us) return QtCore.QRectF(self.picture.boundingRect()) #---------------------------------------------------------------------- def __init__(self, eventEngine, chanlunEngine, data, parent=None): """Constructor""" super(PriceWidget, self).__init__(parent) # K线图EMA均线的参数、变量 self.EMAFastAlpha = 0.0167 # 快速EMA的参数,60 self.EMASlowAlpha = 0.0083 # 慢速EMA的参数,120 self.fastEMA = 0 # 快速EMA的数值 self.slowEMA = 0 # 慢速EMA的数值 self.listfastEMA = [] self.listslowEMA = [] # 保存K线数据的列表对象 self.listBar = [] self.listClose = [] self.listHigh = [] self.listLow = [] self.listOpen = [] # 是否完成了历史数据的读取 self.initCompleted = False self.__eventEngine = eventEngine self.__chanlunEngine = chanlunEngine self.data = data #画图所需数据 # MongoDB数据库相关 self.__mongoConnected = False self.__mongoConnection = None # 调用函数 self.__connectMongo() self.initUi() # self.registerEvent() #---------------------------------------------------------------------- def initUi(self): """初始化界面""" self.setWindowTitle(u'Price') self.vbl_1 = QtGui.QHBoxLayout() self.initplotKline() # plotKline初始化 self.setLayout(self.vbl_1) #---------------------------------------------------------------------- def initplotKline(self): """Kline""" s = self.data.index #横坐标值 print "numbers of KLine: ", len(s) xdict = dict(enumerate(s)) self.__axisTime = MyStringAxis(xdict, orientation='bottom') self.pw2 = pg.PlotWidget(axisItems={'bottom': self.__axisTime}) # K线图 pw2x = self.pw2.getAxis('bottom') pw2x.setGrid(150) # 设置默认x轴网格 pw2y = self.pw2.getAxis('left') pw2y.setGrid(150) # 设置默认y轴网格 self.vbl_1.addWidget(self.pw2) self.pw2.setMinimumWidth(1500) self.pw2.setMaximumWidth(1800) self.pw2.setDownsampling(mode='peak') self.pw2.setClipToView(True) self.curve5 = self.pw2.plot() self.curve6 = self.pw2.plot() self.candle = self.CandlestickItem(self.listBar) self.pw2.addItem(self.candle) ## Draw an arrowhead next to the text box # self.arrow = pg.ArrowItem() # self.pw2.addItem(self.arrow) # 从数据库读取一分钟数据画分钟线 def plotMin(self, symbol): self.initCompleted = True cx = self.__mongoMinDB[symbol].find() print cx.count() if cx: for data in cx: self.barOpen = data['open'] self.barClose = data['close'] self.barLow = data['low'] self.barHigh = data['high'] self.barOpenInterest = data['openInterest'] # print self.num, self.barOpen, self.barClose, self.barLow, self.barHigh, self.barOpenInterest self.onBar(self.num, self.barOpen, self.barClose, self.barLow, self.barHigh, self.barOpenInterest) self.num += 1 # 画历史数据K线图 def plotHistorticData(self): self.initCompleted = True for i in xrange(len(self.data)): self.onBar(i, self.data.index[i], self.data.open[i], self.data.close[i], self.data.low[i], self.data.high[i]) self.plotKline() print "plotKLine success" #---------------------------------------------------------------------- def initHistoricalData(self): """初始历史数据""" if self.symbol!='': print "download histrical data:",self.symbol self.initCompleted = True # 读取历史数据完成 td = timedelta(days=1) # 读取3天的历史TICK数据 # if startDate: # cx = self.loadTick(self.symbol, startDate-td) # else: # today = datetime.today().replace(hour=0, minute=0, second=0, microsecond=0) # cx = self.loadTick(self.symbol, today-td) print cx.count() if cx: for data in cx: tick = Tick(data['symbol']) tick.openPrice = data['lastPrice'] tick.highPrice = data['upperLimit'] tick.lowPrice = data['lowerLimit'] tick.lastPrice = data['lastPrice'] tick.volume = data['volume'] tick.openInterest = data['openInterest'] tick.upperLimit = data['upperLimit'] tick.lowerLimit = data['lowerLimit'] tick.time = data['time'] # tick.ms = data['UpdateMillisec'] tick.bidPrice1 = data['bidPrice1'] tick.bidPrice2 = data['bidPrice2'] tick.bidPrice3 = data['bidPrice3'] tick.bidPrice4 = data['bidPrice4'] tick.bidPrice5 = data['bidPrice5'] tick.askPrice1 = data['askPrice1'] tick.askPrice2 = data['askPrice2'] tick.askPrice3 = data['askPrice3'] tick.askPrice4 = data['askPrice4'] tick.askPrice5 = data['askPrice5'] tick.bidVolume1 = data['bidVolume1'] tick.bidVolume2 = data['bidVolume2'] tick.bidVolume3 = data['bidVolume3'] tick.bidVolume4 = data['bidVolume4'] tick.bidVolume5 = data['bidVolume5'] tick.askVolume1 = data['askVolume1'] tick.askVolume2 = data['askVolume2'] tick.askVolume3 = data['askVolume3'] tick.askVolume4 = data['askVolume4'] tick.askVolume5 = data['askVolume5'] self.onTick(tick) print('load historic data completed') #---------------------------------------------------------------------- def plotKline(self): """K线图""" if self.initCompleted: # 均线 self.curve5.setData(self.listfastEMA, pen=(255, 0, 0), name="Red curve") self.curve6.setData(self.listslowEMA, pen=(0, 255, 0), name="Green curve") # 画K线 self.pw2.removeItem(self.candle) self.candle = self.CandlestickItem(self.listBar) self.pw2.addItem(self.candle) #---------------------------------------------------------------------- def plotText(self): lenClose = len(self.listClose) if lenClose >= 5: # Fractal Signal if self.listClose[-1] > self.listClose[-2] and self.listClose[-3] > self.listClose[-2] and self.listClose[-4] > self.listClose[-2] and self.listClose[-5] > self.listClose[-2] and self.listfastEMA[-1] > self.listslowEMA[-1]: ## Draw an arrowhead next to the text box # self.pw2.removeItem(self.arrow) self.arrow = pg.ArrowItem(pos=(lenClose-1, self.listLow[-1]), angle=90, brush=(255, 0, 0))#红色 self.pw2.addItem(self.arrow) elif self.listClose[-1] < self.listClose[-2] and self.listClose[-3] < self.listClose[-2] and self.listClose[-4] < self.listClose[-2] and self.listClose[-5] < self.listClose[-2] and self.listfastEMA[-1] < self.listslowEMA[-1]: ## Draw an arrowhead next to the text box # self.pw2.removeItem(self.arrow) self.arrow = pg.ArrowItem(pos=(lenClose-1, self.listHigh[-1]), angle=-90, brush=(0, 255, 0))#绿色 self.pw2.addItem(self.arrow) #---------------------------------------------------------------------- def onBar(self, n, t, o, c, l, h): self.listBar.append((n, t, o, c, l, h)) self.listOpen.append(o) self.listClose.append(c) self.listHigh.append(h) self.listLow.append(l) #计算K线图EMA均线 if self.fastEMA: self.fastEMA = c*self.EMAFastAlpha + self.fastEMA*(1-self.EMAFastAlpha) self.slowEMA = c*self.EMASlowAlpha + self.slowEMA*(1-self.EMASlowAlpha) else: self.fastEMA = c self.slowEMA = c self.listfastEMA.append(self.fastEMA) self.listslowEMA.append(self.slowEMA) self.plotText() #显示开仓位置 # ---------------------------------------------------------------------- #画合并后的K线Bar def onBarAfterFenXing(self, n, t, o, c, l, h): self.listBar.append((n, t, o, c, l, h)) def plotKlineAfterFenXing(self): # 画K线 self.pw2.removeItem(self.candle) self.candle = self.CandlestickItem(self.listBar) self.pw2.addItem(self.candle) #---------------------------------------------------------------------- def __connectMongo(self): """连接MongoDB数据库""" try: self.__mongoConnection = pymongo.MongoClient("localhost", 27017) self.__mongoConnected = True self.__mongoMinDB = self.__mongoConnection['VnTrader_1Min_Db'] except ConnectionFailure: pass ######################################################################## class TickWidget(QtGui.QWidget): """用于显示价格走势图""" signal = QtCore.pyqtSignal(type(Event())) # tick图的相关参数、变量 listlastPrice = np.empty(1000) fastMA = 0 midMA = 0 slowMA = 0 listfastMA = np.empty(1000) listmidMA = np.empty(1000) listslowMA = np.empty(1000) tickFastAlpha = 0.0333 # 快速均线的参数,30 tickMidAlpha = 0.0167 # 中速均线的参数,60 tickSlowAlpha = 0.0083 # 慢速均线的参数,120 ptr = 0 ticktime = None # tick数据时间 class CandlestickItem(pg.GraphicsObject): def __init__(self, data): pg.GraphicsObject.__init__(self) self.data = data ## data must have fields: time, open, close, min, max self.generatePicture() def generatePicture(self): ## pre-computing a QPicture object allows paint() to run much more quickly, ## rather than re-drawing the shapes every time. self.picture = QtGui.QPicture() p = QtGui.QPainter(self.picture) p.setPen(pg.mkPen(color='w', width=0.4)) # 0.4 means w*2 a = pg.AxisItem('bottom', pen=None, linkView=None, parent=None, maxTickLength=-5, showValues=True) a.setFixedWidth(1) a.setWidth(1) a.setLabel(show=True) a.setGrid(grid=True) labelStyle = {'color': '#FFF', 'font-size': '14pt'} a.setLabel('label text', units='V', **labelStyle) # w = (self.data[1][0] - self.data[0][0]) / 3. w = 0.2 for (t, open, close, min, max) in self.data: p.drawLine(QtCore.QPointF(t, min), QtCore.QPointF(t, max)) if open > close: p.setBrush(pg.mkBrush('g')) else: p.setBrush(pg.mkBrush('r')) p.drawRect(QtCore.QRectF(t-w, open, w*2, close-open)) pg.setConfigOption('leftButtonPan', False) p.end() def paint(self, p, *args): p.drawPicture(0, 0, self.picture) def boundingRect(self): ## boundingRect _must_ indicate the entire area that will be drawn on ## or else we will get artifacts and possibly crashing. ## (in this case, QPicture does all the work of computing the bouning rect for us) return QtCore.QRectF(self.picture.boundingRect()) #---------------------------------------------------------------------- def __init__(self, eventEngine, chanlunEngine, parent=None): """Constructor""" super(TickWidget, self).__init__(parent) self.__eventEngine = eventEngine self.__chanlunEngine = chanlunEngine # MongoDB数据库相关 self.__mongoConnected = False self.__mongoConnection = None self.__mongoTickDB = None # 调用函数 self.initUi() self.registerEvent() #---------------------------------------------------------------------- def initUi(self): """初始化界面""" self.setWindowTitle(u'Tick') self.vbl_1 = QtGui.QHBoxLayout() self.initplotTick() # plotTick初始化 self.setLayout(self.vbl_1) #---------------------------------------------------------------------- def initplotTick(self): """""" self.pw1 = pg.PlotWidget(name='Plot1') self.vbl_1.addWidget(self.pw1) self.pw1.setMinimumWidth(1500) self.pw1.setMaximumWidth(1800) self.pw1.setRange(xRange=[-360, 0]) self.pw1.setLimits(xMax=5) self.pw1.setDownsampling(mode='peak') self.pw1.setClipToView(True) self.curve1 = self.pw1.plot() self.curve2 = self.pw1.plot() self.curve3 = self.pw1.plot() self.curve4 = self.pw1.plot() # #---------------------------------------------------------------------- # def initHistoricalData(self,startDate=None): # """初始历史数据""" # print "download histrical data" # self.initCompleted = True # 读取历史数据完成 # td = timedelta(days=1) # 读取3天的历史TICK数据 # # if startDate: # cx = self.loadTick(self.symbol, startDate-td) # else: # today = datetime.today().replace(hour=0, minute=0, second=0, microsecond=0) # cx = self.loadTick(self.symbol, today-td) # # print cx.count() # # if cx: # for data in cx: # tick = Tick(data['symbol']) # # tick.openPrice = data['lastPrice'] # tick.highPrice = data['upperLimit'] # tick.lowPrice = data['lowerLimit'] # tick.lastPrice = data['lastPrice'] # # tick.volume = data['volume'] # tick.openInterest = data['openInterest'] # # tick.upperLimit = data['upperLimit'] # tick.lowerLimit = data['lowerLimit'] # # tick.time = data['time'] # # tick.ms = data['UpdateMillisec'] # # tick.bidPrice1 = data['bidPrice1'] # tick.bidPrice2 = data['bidPrice2'] # tick.bidPrice3 = data['bidPrice3'] # tick.bidPrice4 = data['bidPrice4'] # tick.bidPrice5 = data['bidPrice5'] # # tick.askPrice1 = data['askPrice1'] # tick.askPrice2 = data['askPrice2'] # tick.askPrice3 = data['askPrice3'] # tick.askPrice4 = data['askPrice4'] # tick.askPrice5 = data['askPrice5'] # # tick.bidVolume1 = data['bidVolume1'] # tick.bidVolume2 = data['bidVolume2'] # tick.bidVolume3 = data['bidVolume3'] # tick.bidVolume4 = data['bidVolume4'] # tick.bidVolume5 = data['bidVolume5'] # # tick.askVolume1 = data['askVolume1'] # tick.askVolume2 = data['askVolume2'] # tick.askVolume3 = data['askVolume3'] # tick.askVolume4 = data['askVolume4'] # tick.askVolume5 = data['askVolume5'] # # self.onTick(tick) # # print('load historic data completed') #---------------------------------------------------------------------- def plotTick(self): """画tick图""" self.curve1.setData(self.listlastPrice[:self.ptr]) self.curve2.setData(self.listfastMA[:self.ptr], pen=(255, 0, 0), name="Red curve") self.curve3.setData(self.listmidMA[:self.ptr], pen=(0, 255, 0), name="Green curve") self.curve4.setData(self.listslowMA[:self.ptr], pen=(0, 0, 255), name="Blue curve") self.curve1.setPos(-self.ptr, 0) self.curve2.setPos(-self.ptr, 0) self.curve3.setPos(-self.ptr, 0) self.curve4.setPos(-self.ptr, 0) #---------------------------------------------------------------------- def updateMarketData(self, event): """更新行情""" data = event.dict_['data'] print "update", data['InstrumentID'] symbol = data['InstrumentID'] tick = Tick(symbol) tick.openPrice = data['OpenPrice'] tick.highPrice = data['HighestPrice'] tick.lowPrice = data['LowestPrice'] tick.lastPrice = data['LastPrice'] tick.volume = data['Volume'] tick.openInterest = data['OpenInterest'] tick.upperLimit = data['UpperLimitPrice'] tick.lowerLimit = data['LowerLimitPrice'] tick.time = data['UpdateTime'] tick.ms = data['UpdateMillisec'] tick.bidPrice1 = data['BidPrice1'] tick.bidPrice2 = data['BidPrice2'] tick.bidPrice3 = data['BidPrice3'] tick.bidPrice4 = data['BidPrice4'] tick.bidPrice5 = data['BidPrice5'] tick.askPrice1 = data['AskPrice1'] tick.askPrice2 = data['AskPrice2'] tick.askPrice3 = data['AskPrice3'] tick.askPrice4 = data['AskPrice4'] tick.askPrice5 = data['AskPrice5'] tick.bidVolume1 = data['BidVolume1'] tick.bidVolume2 = data['BidVolume2'] tick.bidVolume3 = data['BidVolume3'] tick.bidVolume4 = data['BidVolume4'] tick.bidVolume5 = data['BidVolume5'] tick.askVolume1 = data['AskVolume1'] tick.askVolume2 = data['AskVolume2'] tick.askVolume3 = data['AskVolume3'] tick.askVolume4 = data['AskVolume4'] tick.askVolume5 = data['AskVolume5'] self.onTick(tick) # tick数据更新 self.__recordTick(tick) #记录Tick数据 #---------------------------------------------------------------------- def onTick(self, tick): """tick数据更新""" from datetime import time # 首先生成datetime.time格式的时间(便于比较),从字符串时间转化为time格式的时间 hh, mm, ss = tick.time.split(':') self.ticktime = time(int(hh), int(mm), int(ss), microsecond=tick.ms) # 计算tick图的相关参数 if self.ptr == 0: self.fastMA = tick.lastPrice self.midMA = tick.lastPrice self.slowMA = tick.lastPrice else: self.fastMA = (1-self.tickFastAlpha) * self.fastMA + self.tickFastAlpha * tick.lastPrice self.midMA = (1-self.tickMidAlpha) * self.midMA + self.tickMidAlpha * tick.lastPrice self.slowMA = (1-self.tickSlowAlpha) * self.slowMA + self.tickSlowAlpha * tick.lastPrice self.listlastPrice[self.ptr] = int(tick.lastPrice) self.listfastMA[self.ptr] = int(self.fastMA) self.listmidMA[self.ptr] = int(self.midMA) self.listslowMA[self.ptr] = int(self.slowMA) self.ptr += 1 print(self.ptr) if self.ptr >= self.listlastPrice.shape[0]: tmp = self.listlastPrice self.listlastPrice = np.empty(self.listlastPrice.shape[0] * 2) self.listlastPrice[:tmp.shape[0]] = tmp tmp = self.listfastMA self.listfastMA = np.empty(self.listfastMA.shape[0] * 2) self.listfastMA[:tmp.shape[0]] = tmp tmp = self.listmidMA self.listmidMA = np.empty(self.listmidMA.shape[0] * 2) self.listmidMA[:tmp.shape[0]] = tmp tmp = self.listslowMA self.listslowMA = np.empty(self.listslowMA.shape[0] * 2) self.listslowMA[:tmp.shape[0]] = tmp # 调用画图函数 self.plotTick() # tick图 #---------------------------------------------------------------------- def __connectMongo(self): """连接MongoDB数据库""" try: self.__mongoConnection = pymongo.MongoClient("localhost", 27017) self.__mongoConnected = True self.__mongoTickDB = self.__mongoConnection['VnTrader_Tick_Db'] except ConnectionFailure: pass #---------------------------------------------------------------------- def __recordTick(self, data): """将Tick数据插入到MongoDB中""" if self.__mongoConnected: symbol = data['InstrumentID'] data['date'] = datetime.now().strftime('%Y%m%d') self.__mongoTickDB[symbol].insert(data) # #---------------------------------------------------------------------- # def loadTick(self, symbol, startDate, endDate=None): # """从MongoDB中读取Tick数据""" # cx = self.__mongoTickDB[symbol].find() # print cx.count() # return cx # # if self.__mongoConnected: # # collection = self.__mongoTickDB[symbol] # # # # # 如果输入了读取TICK的最后日期 # # if endDate: # # cx = collection.find({'date': {'$gte': startDate, '$lte': endDate}}) # # else: # # cx = collection.find({'date': {'$gte': startDate}}) # # return cx # # else: # # return None #---------------------------------------------------------------------- def registerEvent(self): """注册事件监听""" print "connect" self.signal.connect(self.updateMarketData) self.__eventEngine.register(EVENT_MARKETDATA, self.signal.emit) class Tick: """Tick数据对象""" #---------------------------------------------------------------------- def __init__(self, symbol): """Constructor""" self.symbol = symbol # 合约代码 self.openPrice = 0 # OHLC self.highPrice = 0 self.lowPrice = 0 self.lastPrice = 0 self.volume = 0 # 成交量 self.openInterest = 0 # 持仓量 self.upperLimit = 0 # 涨停价 self.lowerLimit = 0 # 跌停价 self.time = '' # 更新时间和毫秒 self.ms = 0 self.bidPrice1 = 0 # 深度行情 self.bidPrice2 = 0 self.bidPrice3 = 0 self.bidPrice4 = 0 self.bidPrice5 = 0 self.askPrice1 = 0 self.askPrice2 = 0 self.askPrice3 = 0 self.askPrice4 = 0 self.askPrice5 = 0 self.bidVolume1 = 0 self.bidVolume2 = 0 self.bidVolume3 = 0 self.bidVolume4 = 0 self.bidVolume5 = 0 self.askVolume1 = 0 self.askVolume2 = 0 self.askVolume3 = 0 self.askVolume4 = 0 self.askVolume5 = 0
mit
kaiserroll14/301finalproject
main/pandas/sparse/panel.py
9
18717
""" Data structures for sparse float data. Life is made simpler by dealing only with float64 data """ # pylint: disable=E1101,E1103,W0231 import warnings from pandas.compat import range, lrange, zip from pandas import compat import numpy as np from pandas.core.index import Index, MultiIndex, _ensure_index from pandas.core.frame import DataFrame from pandas.core.panel import Panel from pandas.sparse.frame import SparseDataFrame from pandas.util.decorators import deprecate import pandas.core.common as com import pandas.core.ops as ops class SparsePanelAxis(object): def __init__(self, cache_field, frame_attr): self.cache_field = cache_field self.frame_attr = frame_attr def __get__(self, obj, type=None): return getattr(obj, self.cache_field, None) def __set__(self, obj, value): value = _ensure_index(value) if isinstance(value, MultiIndex): raise NotImplementedError("value cannot be a MultiIndex") for v in compat.itervalues(obj._frames): setattr(v, self.frame_attr, value) setattr(obj, self.cache_field, value) class SparsePanel(Panel): """ Sparse version of Panel Parameters ---------- frames : dict of DataFrame objects items : array-like major_axis : array-like minor_axis : array-like default_kind : {'block', 'integer'}, default 'block' Default sparse kind for converting Series to SparseSeries. Will not override SparseSeries passed into constructor default_fill_value : float Default fill_value for converting Series to SparseSeries. Will not override SparseSeries passed in Notes ----- """ ndim = 3 _typ = 'panel' _subtyp = 'sparse_panel' def __init__(self, frames=None, items=None, major_axis=None, minor_axis=None, default_fill_value=np.nan, default_kind='block', copy=False): # deprecation #11157 warnings.warn("SparsePanel is deprecated and will be removed in a future version", FutureWarning, stacklevel=2) if frames is None: frames = {} if isinstance(frames, np.ndarray): new_frames = {} for item, vals in zip(items, frames): new_frames[item] = \ SparseDataFrame(vals, index=major_axis, columns=minor_axis, default_fill_value=default_fill_value, default_kind=default_kind) frames = new_frames if not isinstance(frames, dict): raise TypeError('input must be a dict, a %r was passed' % type(frames).__name__) self.default_fill_value = fill_value = default_fill_value self.default_kind = kind = default_kind # pre-filter, if necessary if items is None: items = Index(sorted(frames.keys())) items = _ensure_index(items) (clean_frames, major_axis, minor_axis) = _convert_frames(frames, major_axis, minor_axis, kind=kind, fill_value=fill_value) self._frames = clean_frames # do we want to fill missing ones? for item in items: if item not in clean_frames: raise ValueError('column %r not found in data' % item) self._items = items self.major_axis = major_axis self.minor_axis = minor_axis def _consolidate_inplace(self): # pragma: no cover # do nothing when DataFrame calls this method pass def __array_wrap__(self, result): return SparsePanel(result, items=self.items, major_axis=self.major_axis, minor_axis=self.minor_axis, default_kind=self.default_kind, default_fill_value=self.default_fill_value) @classmethod def from_dict(cls, data): """ Analogous to Panel.from_dict """ return SparsePanel(data) def to_dense(self): """ Convert SparsePanel to (dense) Panel Returns ------- dense : Panel """ return Panel(self.values, self.items, self.major_axis, self.minor_axis) def as_matrix(self): return self.values @property def values(self): # return dense values return np.array([self._frames[item].values for item in self.items]) # need a special property for items to make the field assignable _items = None def _get_items(self): return self._items def _set_items(self, new_items): new_items = _ensure_index(new_items) if isinstance(new_items, MultiIndex): raise NotImplementedError("itemps cannot be a MultiIndex") # need to create new frames dict old_frame_dict = self._frames old_items = self._items self._frames = dict((new_k, old_frame_dict[old_k]) for new_k, old_k in zip(new_items, old_items)) self._items = new_items items = property(fget=_get_items, fset=_set_items) # DataFrame's index major_axis = SparsePanelAxis('_major_axis', 'index') # DataFrame's columns / "items" minor_axis = SparsePanelAxis('_minor_axis', 'columns') def _ixs(self, i, axis=0): """ for compat as we don't support Block Manager here i : int, slice, or sequence of integers axis : int """ key = self._get_axis(axis)[i] # xs cannot handle a non-scalar key, so just reindex here if com.is_list_like(key): return self.reindex(**{self._get_axis_name(axis): key}) return self.xs(key, axis=axis) def _slice(self, slobj, axis=0, kind=None): """ for compat as we don't support Block Manager here """ axis = self._get_axis_name(axis) index = self._get_axis(axis) return self.reindex(**{axis: index[slobj]}) def _get_item_cache(self, key): return self._frames[key] def __setitem__(self, key, value): if isinstance(value, DataFrame): value = value.reindex(index=self.major_axis, columns=self.minor_axis) if not isinstance(value, SparseDataFrame): value = value.to_sparse(fill_value=self.default_fill_value, kind=self.default_kind) else: raise ValueError('only DataFrame objects can be set currently') self._frames[key] = value if key not in self.items: self._items = Index(list(self.items) + [key]) def set_value(self, item, major, minor, value): """ Quickly set single value at (item, major, minor) location Parameters ---------- item : item label (panel item) major : major axis label (panel item row) minor : minor axis label (panel item column) value : scalar Notes ----- This method *always* returns a new object. It is not particularly efficient but is provided for API compatibility with Panel Returns ------- panel : SparsePanel """ dense = self.to_dense().set_value(item, major, minor, value) return dense.to_sparse(kind=self.default_kind, fill_value=self.default_fill_value) def __delitem__(self, key): loc = self.items.get_loc(key) indices = lrange(loc) + lrange(loc + 1, len(self.items)) del self._frames[key] self._items = self._items.take(indices) def __getstate__(self): # pickling return (self._frames, com._pickle_array(self.items), com._pickle_array(self.major_axis), com._pickle_array(self.minor_axis), self.default_fill_value, self.default_kind) def __setstate__(self, state): frames, items, major, minor, fv, kind = state self.default_fill_value = fv self.default_kind = kind self._items = _ensure_index(com._unpickle_array(items)) self._major_axis = _ensure_index(com._unpickle_array(major)) self._minor_axis = _ensure_index(com._unpickle_array(minor)) self._frames = frames def copy(self, deep=True): """ Make a copy of the sparse panel Returns ------- copy : SparsePanel """ d = self._construct_axes_dict() if deep: new_data = dict((k, v.copy(deep=True)) for k, v in compat.iteritems(self._frames)) d = dict((k, v.copy(deep=True)) for k, v in compat.iteritems(d)) else: new_data = self._frames.copy() d['default_fill_value']=self.default_fill_value d['default_kind']=self.default_kind return SparsePanel(new_data, **d) def to_frame(self, filter_observations=True): """ Convert SparsePanel to (dense) DataFrame Returns ------- frame : DataFrame """ if not filter_observations: raise TypeError('filter_observations=False not supported for ' 'SparsePanel.to_long') I, N, K = self.shape counts = np.zeros(N * K, dtype=int) d_values = {} d_indexer = {} for item in self.items: frame = self[item] values, major, minor = _stack_sparse_info(frame) # values are stacked column-major indexer = minor * N + major counts.put(indexer, counts.take(indexer) + 1) # cuteness d_values[item] = values d_indexer[item] = indexer # have full set of observations for each item mask = counts == I # for each item, take mask values at index locations for those sparse # values, and use that to select values values = np.column_stack([d_values[item][mask.take(d_indexer[item])] for item in self.items]) inds, = mask.nonzero() # still column major major_labels = inds % N minor_labels = inds // N index = MultiIndex(levels=[self.major_axis, self.minor_axis], labels=[major_labels, minor_labels], verify_integrity=False) df = DataFrame(values, index=index, columns=self.items) return df.sortlevel(level=0) to_long = deprecate('to_long', to_frame) toLong = deprecate('toLong', to_frame) def reindex(self, major=None, items=None, minor=None, major_axis=None, minor_axis=None, copy=False): """ Conform / reshape panel axis labels to new input labels Parameters ---------- major : array-like, default None items : array-like, default None minor : array-like, default None copy : boolean, default False Copy underlying SparseDataFrame objects Returns ------- reindexed : SparsePanel """ major = com._mut_exclusive(major=major, major_axis=major_axis) minor = com._mut_exclusive(minor=minor, minor_axis=minor_axis) if com._all_none(items, major, minor): raise ValueError('Must specify at least one axis') major = self.major_axis if major is None else major minor = self.minor_axis if minor is None else minor if items is not None: new_frames = {} for item in items: if item in self._frames: new_frames[item] = self._frames[item] else: raise NotImplementedError('Reindexing with new items not yet ' 'supported') else: new_frames = self._frames if copy: new_frames = dict((k, v.copy()) for k, v in compat.iteritems(new_frames)) return SparsePanel(new_frames, items=items, major_axis=major, minor_axis=minor, default_fill_value=self.default_fill_value, default_kind=self.default_kind) def _combine(self, other, func, axis=0): if isinstance(other, DataFrame): return self._combineFrame(other, func, axis=axis) elif isinstance(other, Panel): return self._combinePanel(other, func) elif np.isscalar(other): new_frames = dict((k, func(v, other)) for k, v in compat.iteritems(self)) return self._new_like(new_frames) def _combineFrame(self, other, func, axis=0): index, columns = self._get_plane_axes(axis) axis = self._get_axis_number(axis) other = other.reindex(index=index, columns=columns) if axis == 0: new_values = func(self.values, other.values) elif axis == 1: new_values = func(self.values.swapaxes(0, 1), other.values.T) new_values = new_values.swapaxes(0, 1) elif axis == 2: new_values = func(self.values.swapaxes(0, 2), other.values) new_values = new_values.swapaxes(0, 2) # TODO: make faster! new_frames = {} for item, item_slice in zip(self.items, new_values): old_frame = self[item] ofv = old_frame.default_fill_value ok = old_frame.default_kind new_frames[item] = SparseDataFrame(item_slice, index=self.major_axis, columns=self.minor_axis, default_fill_value=ofv, default_kind=ok) return self._new_like(new_frames) def _new_like(self, new_frames): return SparsePanel(new_frames, self.items, self.major_axis, self.minor_axis, default_fill_value=self.default_fill_value, default_kind=self.default_kind) def _combinePanel(self, other, func): items = self.items.union(other.items) major = self.major_axis.union(other.major_axis) minor = self.minor_axis.union(other.minor_axis) # could check that everything's the same size, but forget it this = self.reindex(items=items, major=major, minor=minor) other = other.reindex(items=items, major=major, minor=minor) new_frames = {} for item in items: new_frames[item] = func(this[item], other[item]) if not isinstance(other, SparsePanel): new_default_fill = self.default_fill_value else: # maybe unnecessary new_default_fill = func(self.default_fill_value, other.default_fill_value) return SparsePanel(new_frames, items, major, minor, default_fill_value=new_default_fill, default_kind=self.default_kind) def major_xs(self, key): """ Return slice of panel along major axis Parameters ---------- key : object Major axis label Returns ------- y : DataFrame index -> minor axis, columns -> items """ slices = dict((k, v.xs(key)) for k, v in compat.iteritems(self)) return DataFrame(slices, index=self.minor_axis, columns=self.items) def minor_xs(self, key): """ Return slice of panel along minor axis Parameters ---------- key : object Minor axis label Returns ------- y : SparseDataFrame index -> major axis, columns -> items """ slices = dict((k, v[key]) for k, v in compat.iteritems(self)) return SparseDataFrame(slices, index=self.major_axis, columns=self.items, default_fill_value=self.default_fill_value, default_kind=self.default_kind) # TODO: allow SparsePanel to work with flex arithmetic. # pow and mod only work for scalars for now def pow(self, val, *args, **kwargs): """wrapper around `__pow__` (only works for scalar values)""" return self.__pow__(val) def mod(self, val, *args, **kwargs): """wrapper around `__mod__` (only works for scalar values""" return self.__mod__(val) # Sparse objects opt out of numexpr SparsePanel._add_aggregate_operations(use_numexpr=False) ops.add_special_arithmetic_methods(SparsePanel, use_numexpr=False, **ops.panel_special_funcs) SparseWidePanel = SparsePanel def _convert_frames(frames, index, columns, fill_value=np.nan, kind='block'): from pandas.core.panel import _get_combined_index output = {} for item, df in compat.iteritems(frames): if not isinstance(df, SparseDataFrame): df = SparseDataFrame(df, default_kind=kind, default_fill_value=fill_value) output[item] = df if index is None: all_indexes = [df.index for df in output.values()] index = _get_combined_index(all_indexes) if columns is None: all_columns = [df.columns for df in output.values()] columns = _get_combined_index(all_columns) index = _ensure_index(index) columns = _ensure_index(columns) for item, df in compat.iteritems(output): if not (df.index.equals(index) and df.columns.equals(columns)): output[item] = df.reindex(index=index, columns=columns) return output, index, columns def _stack_sparse_info(frame): lengths = [s.sp_index.npoints for _, s in compat.iteritems(frame)] # this is pretty fast minor_labels = np.repeat(np.arange(len(frame.columns)), lengths) inds_to_concat = [] vals_to_concat = [] for col in frame.columns: series = frame[col] if not np.isnan(series.fill_value): raise TypeError('This routine assumes NaN fill value') int_index = series.sp_index.to_int_index() inds_to_concat.append(int_index.indices) vals_to_concat.append(series.sp_values) major_labels = np.concatenate(inds_to_concat) sparse_values = np.concatenate(vals_to_concat) return sparse_values, major_labels, minor_labels
gpl-3.0
pompiduskus/scikit-learn
doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py
254
2253
"""Build a language detector model The goal of this exercise is to train a linear classifier on text features that represent sequences of up to 3 consecutive characters so as to be recognize natural languages by using the frequencies of short character sequences as 'fingerprints'. """ # Author: Olivier Grisel <olivier.grisel@ensta.org> # License: Simplified BSD import sys from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import Perceptron from sklearn.pipeline import Pipeline from sklearn.datasets import load_files from sklearn.cross_validation import train_test_split from sklearn import metrics # The training data folder must be passed as first argument languages_data_folder = sys.argv[1] dataset = load_files(languages_data_folder) # Split the dataset in training and test set: docs_train, docs_test, y_train, y_test = train_test_split( dataset.data, dataset.target, test_size=0.5) # TASK: Build a an vectorizer that splits strings into sequence of 1 to 3 # characters instead of word tokens vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char', use_idf=False) # TASK: Build a vectorizer / classifier pipeline using the previous analyzer # the pipeline instance should stored in a variable named clf clf = Pipeline([ ('vec', vectorizer), ('clf', Perceptron()), ]) # TASK: Fit the pipeline on the training set clf.fit(docs_train, y_train) # TASK: Predict the outcome on the testing set in a variable named y_predicted y_predicted = clf.predict(docs_test) # Print the classification report print(metrics.classification_report(y_test, y_predicted, target_names=dataset.target_names)) # Plot the confusion matrix cm = metrics.confusion_matrix(y_test, y_predicted) print(cm) #import pylab as pl #pl.matshow(cm, cmap=pl.cm.jet) #pl.show() # Predict the result on some short new sentences: sentences = [ u'This is a language detection test.', u'Ceci est un test de d\xe9tection de la langue.', u'Dies ist ein Test, um die Sprache zu erkennen.', ] predicted = clf.predict(sentences) for s, p in zip(sentences, predicted): print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
bsd-3-clause
rabernat/xrft
setup.py
1
1391
import os import versioneer from setuptools import setup, find_packages PACKAGES = find_packages() DISTNAME = 'xrft' LICENSE = 'MIT' AUTHOR = 'xrft Developers' AUTHOR_EMAIL = 'takaya@ldeo.columbia.edu' URL = 'https://github.com/xgcm/xrft' CLASSIFIERS = [ 'Development Status :: 4 - Beta', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Intended Audience :: Science/Research', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Topic :: Scientific/Engineering', ] INSTALL_REQUIRES = ['xarray', 'dask', 'numpy', 'pandas', 'scipy'] EXTRAS_REQUIRE = ['cftime'] SETUP_REQUIRES = ['pytest-runner'] TESTS_REQUIRE = ['pytest >= 2.8', 'coverage'] DESCRIPTION = "Discrete Fourier Transform with xarray" def readme(): with open('README.rst') as f: return f.read() setup(name=DISTNAME, version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), license=LICENSE, author=AUTHOR, author_email=AUTHOR_EMAIL, classifiers=CLASSIFIERS, description=DESCRIPTION, long_description=readme(), install_requires=INSTALL_REQUIRES, setup_requires=SETUP_REQUIRES, tests_require=TESTS_REQUIRE, url=URL, packages=find_packages())
mit
numenta/nupic
external/linux32/lib/python2.6/site-packages/matplotlib/colorbar.py
69
27260
''' Colorbar toolkit with two classes and a function: :class:`ColorbarBase` the base class with full colorbar drawing functionality. It can be used as-is to make a colorbar for a given colormap; a mappable object (e.g., image) is not needed. :class:`Colorbar` the derived class for use with images or contour plots. :func:`make_axes` a function for resizing an axes and adding a second axes suitable for a colorbar The :meth:`~matplotlib.figure.Figure.colorbar` method uses :func:`make_axes` and :class:`Colorbar`; the :func:`~matplotlib.pyplot.colorbar` function is a thin wrapper over :meth:`~matplotlib.figure.Figure.colorbar`. ''' import numpy as np import matplotlib as mpl import matplotlib.colors as colors import matplotlib.cm as cm import matplotlib.ticker as ticker import matplotlib.cbook as cbook import matplotlib.lines as lines import matplotlib.patches as patches import matplotlib.collections as collections import matplotlib.contour as contour make_axes_kw_doc = ''' ========== ==================================================== Property Description ========== ==================================================== *fraction* 0.15; fraction of original axes to use for colorbar *pad* 0.05 if vertical, 0.15 if horizontal; fraction of original axes between colorbar and new image axes *shrink* 1.0; fraction by which to shrink the colorbar *aspect* 20; ratio of long to short dimensions ========== ==================================================== ''' colormap_kw_doc = ''' =========== ==================================================== Property Description =========== ==================================================== *extend* [ 'neither' | 'both' | 'min' | 'max' ] If not 'neither', make pointed end(s) for out-of- range values. These are set for a given colormap using the colormap set_under and set_over methods. *spacing* [ 'uniform' | 'proportional' ] Uniform spacing gives each discrete color the same space; proportional makes the space proportional to the data interval. *ticks* [ None | list of ticks | Locator object ] If None, ticks are determined automatically from the input. *format* [ None | format string | Formatter object ] If None, the :class:`~matplotlib.ticker.ScalarFormatter` is used. If a format string is given, e.g. '%.3f', that is used. An alternative :class:`~matplotlib.ticker.Formatter` object may be given instead. *drawedges* [ False | True ] If true, draw lines at color boundaries. =========== ==================================================== The following will probably be useful only in the context of indexed colors (that is, when the mappable has norm=NoNorm()), or other unusual circumstances. ============ =================================================== Property Description ============ =================================================== *boundaries* None or a sequence *values* None or a sequence which must be of length 1 less than the sequence of *boundaries*. For each region delimited by adjacent entries in *boundaries*, the color mapped to the corresponding value in values will be used. ============ =================================================== ''' colorbar_doc = ''' Add a colorbar to a plot. Function signatures for the :mod:`~matplotlib.pyplot` interface; all but the first are also method signatures for the :meth:`~matplotlib.figure.Figure.colorbar` method:: colorbar(**kwargs) colorbar(mappable, **kwargs) colorbar(mappable, cax=cax, **kwargs) colorbar(mappable, ax=ax, **kwargs) arguments: *mappable* the :class:`~matplotlib.image.Image`, :class:`~matplotlib.contour.ContourSet`, etc. to which the colorbar applies; this argument is mandatory for the :meth:`~matplotlib.figure.Figure.colorbar` method but optional for the :func:`~matplotlib.pyplot.colorbar` function, which sets the default to the current image. keyword arguments: *cax* None | axes object into which the colorbar will be drawn *ax* None | parent axes object from which space for a new colorbar axes will be stolen Additional keyword arguments are of two kinds: axes properties: %s colorbar properties: %s If *mappable* is a :class:`~matplotlib.contours.ContourSet`, its *extend* kwarg is included automatically. Note that the *shrink* kwarg provides a simple way to keep a vertical colorbar, for example, from being taller than the axes of the mappable to which the colorbar is attached; but it is a manual method requiring some trial and error. If the colorbar is too tall (or a horizontal colorbar is too wide) use a smaller value of *shrink*. For more precise control, you can manually specify the positions of the axes objects in which the mappable and the colorbar are drawn. In this case, do not use any of the axes properties kwargs. returns: :class:`~matplotlib.colorbar.Colorbar` instance; see also its base class, :class:`~matplotlib.colorbar.ColorbarBase`. Call the :meth:`~matplotlib.colorbar.ColorbarBase.set_label` method to label the colorbar. ''' % (make_axes_kw_doc, colormap_kw_doc) class ColorbarBase(cm.ScalarMappable): ''' Draw a colorbar in an existing axes. This is a base class for the :class:`Colorbar` class, which is the basis for the :func:`~matplotlib.pyplot.colorbar` method and pylab function. It is also useful by itself for showing a colormap. If the *cmap* kwarg is given but *boundaries* and *values* are left as None, then the colormap will be displayed on a 0-1 scale. To show the under- and over-value colors, specify the *norm* as:: colors.Normalize(clip=False) To show the colors versus index instead of on the 0-1 scale, use:: norm=colors.NoNorm. Useful attributes: :attr:`ax` the Axes instance in which the colorbar is drawn :attr:`lines` a LineCollection if lines were drawn, otherwise None :attr:`dividers` a LineCollection if *drawedges* is True, otherwise None Useful public methods are :meth:`set_label` and :meth:`add_lines`. ''' _slice_dict = {'neither': slice(0,1000000), 'both': slice(1,-1), 'min': slice(1,1000000), 'max': slice(0,-1)} def __init__(self, ax, cmap=None, norm=None, alpha=1.0, values=None, boundaries=None, orientation='vertical', extend='neither', spacing='uniform', # uniform or proportional ticks=None, format=None, drawedges=False, filled=True, ): self.ax = ax if cmap is None: cmap = cm.get_cmap() if norm is None: norm = colors.Normalize() self.alpha = alpha cm.ScalarMappable.__init__(self, cmap=cmap, norm=norm) self.values = values self.boundaries = boundaries self.extend = extend self._inside = self._slice_dict[extend] self.spacing = spacing self.orientation = orientation self.drawedges = drawedges self.filled = filled self.solids = None self.lines = None self.dividers = None self.set_label('') if cbook.iterable(ticks): self.locator = ticker.FixedLocator(ticks, nbins=len(ticks)) else: self.locator = ticks # Handle default in _ticker() if format is None: if isinstance(self.norm, colors.LogNorm): self.formatter = ticker.LogFormatter() else: self.formatter = ticker.ScalarFormatter() elif cbook.is_string_like(format): self.formatter = ticker.FormatStrFormatter(format) else: self.formatter = format # Assume it is a Formatter # The rest is in a method so we can recalculate when clim changes. self.draw_all() def draw_all(self): ''' Calculate any free parameters based on the current cmap and norm, and do all the drawing. ''' self._process_values() self._find_range() X, Y = self._mesh() C = self._values[:,np.newaxis] self._config_axes(X, Y) if self.filled: self._add_solids(X, Y, C) self._set_label() def _config_axes(self, X, Y): ''' Make an axes patch and outline. ''' ax = self.ax ax.set_frame_on(False) ax.set_navigate(False) xy = self._outline(X, Y) ax.update_datalim(xy) ax.set_xlim(*ax.dataLim.intervalx) ax.set_ylim(*ax.dataLim.intervaly) self.outline = lines.Line2D(xy[:, 0], xy[:, 1], color=mpl.rcParams['axes.edgecolor'], linewidth=mpl.rcParams['axes.linewidth']) ax.add_artist(self.outline) self.outline.set_clip_box(None) self.outline.set_clip_path(None) c = mpl.rcParams['axes.facecolor'] self.patch = patches.Polygon(xy, edgecolor=c, facecolor=c, linewidth=0.01, zorder=-1) ax.add_artist(self.patch) ticks, ticklabels, offset_string = self._ticker() if self.orientation == 'vertical': ax.set_xticks([]) ax.yaxis.set_label_position('right') ax.yaxis.set_ticks_position('right') ax.set_yticks(ticks) ax.set_yticklabels(ticklabels) ax.yaxis.get_major_formatter().set_offset_string(offset_string) else: ax.set_yticks([]) ax.xaxis.set_label_position('bottom') ax.set_xticks(ticks) ax.set_xticklabels(ticklabels) ax.xaxis.get_major_formatter().set_offset_string(offset_string) def _set_label(self): if self.orientation == 'vertical': self.ax.set_ylabel(self._label, **self._labelkw) else: self.ax.set_xlabel(self._label, **self._labelkw) def set_label(self, label, **kw): ''' Label the long axis of the colorbar ''' self._label = label self._labelkw = kw self._set_label() def _outline(self, X, Y): ''' Return *x*, *y* arrays of colorbar bounding polygon, taking orientation into account. ''' N = X.shape[0] ii = [0, 1, N-2, N-1, 2*N-1, 2*N-2, N+1, N, 0] x = np.take(np.ravel(np.transpose(X)), ii) y = np.take(np.ravel(np.transpose(Y)), ii) x = x.reshape((len(x), 1)) y = y.reshape((len(y), 1)) if self.orientation == 'horizontal': return np.hstack((y, x)) return np.hstack((x, y)) def _edges(self, X, Y): ''' Return the separator line segments; helper for _add_solids. ''' N = X.shape[0] # Using the non-array form of these line segments is much # simpler than making them into arrays. if self.orientation == 'vertical': return [zip(X[i], Y[i]) for i in range(1, N-1)] else: return [zip(Y[i], X[i]) for i in range(1, N-1)] def _add_solids(self, X, Y, C): ''' Draw the colors using :meth:`~matplotlib.axes.Axes.pcolor`; optionally add separators. ''' ## Change to pcolorfast after fixing bugs in some backends... if self.orientation == 'vertical': args = (X, Y, C) else: args = (np.transpose(Y), np.transpose(X), np.transpose(C)) kw = {'cmap':self.cmap, 'norm':self.norm, 'shading':'flat', 'alpha':self.alpha} # Save, set, and restore hold state to keep pcolor from # clearing the axes. Ordinarily this will not be needed, # since the axes object should already have hold set. _hold = self.ax.ishold() self.ax.hold(True) col = self.ax.pcolor(*args, **kw) self.ax.hold(_hold) #self.add_observer(col) # We should observe, not be observed... self.solids = col if self.drawedges: self.dividers = collections.LineCollection(self._edges(X,Y), colors=(mpl.rcParams['axes.edgecolor'],), linewidths=(0.5*mpl.rcParams['axes.linewidth'],) ) self.ax.add_collection(self.dividers) def add_lines(self, levels, colors, linewidths): ''' Draw lines on the colorbar. ''' N = len(levels) dummy, y = self._locate(levels) if len(y) <> N: raise ValueError("levels are outside colorbar range") x = np.array([0.0, 1.0]) X, Y = np.meshgrid(x,y) if self.orientation == 'vertical': xy = [zip(X[i], Y[i]) for i in range(N)] else: xy = [zip(Y[i], X[i]) for i in range(N)] col = collections.LineCollection(xy, linewidths=linewidths) self.lines = col col.set_color(colors) self.ax.add_collection(col) def _ticker(self): ''' Return two sequences: ticks (colorbar data locations) and ticklabels (strings). ''' locator = self.locator formatter = self.formatter if locator is None: if self.boundaries is None: if isinstance(self.norm, colors.NoNorm): nv = len(self._values) base = 1 + int(nv/10) locator = ticker.IndexLocator(base=base, offset=0) elif isinstance(self.norm, colors.BoundaryNorm): b = self.norm.boundaries locator = ticker.FixedLocator(b, nbins=10) elif isinstance(self.norm, colors.LogNorm): locator = ticker.LogLocator() else: locator = ticker.MaxNLocator() else: b = self._boundaries[self._inside] locator = ticker.FixedLocator(b, nbins=10) if isinstance(self.norm, colors.NoNorm): intv = self._values[0], self._values[-1] else: intv = self.vmin, self.vmax locator.create_dummy_axis() formatter.create_dummy_axis() locator.set_view_interval(*intv) locator.set_data_interval(*intv) formatter.set_view_interval(*intv) formatter.set_data_interval(*intv) b = np.array(locator()) b, ticks = self._locate(b) formatter.set_locs(b) ticklabels = [formatter(t, i) for i, t in enumerate(b)] offset_string = formatter.get_offset() return ticks, ticklabels, offset_string def _process_values(self, b=None): ''' Set the :attr:`_boundaries` and :attr:`_values` attributes based on the input boundaries and values. Input boundaries can be *self.boundaries* or the argument *b*. ''' if b is None: b = self.boundaries if b is not None: self._boundaries = np.asarray(b, dtype=float) if self.values is None: self._values = 0.5*(self._boundaries[:-1] + self._boundaries[1:]) if isinstance(self.norm, colors.NoNorm): self._values = (self._values + 0.00001).astype(np.int16) return self._values = np.array(self.values) return if self.values is not None: self._values = np.array(self.values) if self.boundaries is None: b = np.zeros(len(self.values)+1, 'd') b[1:-1] = 0.5*(self._values[:-1] - self._values[1:]) b[0] = 2.0*b[1] - b[2] b[-1] = 2.0*b[-2] - b[-3] self._boundaries = b return self._boundaries = np.array(self.boundaries) return # Neither boundaries nor values are specified; # make reasonable ones based on cmap and norm. if isinstance(self.norm, colors.NoNorm): b = self._uniform_y(self.cmap.N+1) * self.cmap.N - 0.5 v = np.zeros((len(b)-1,), dtype=np.int16) v[self._inside] = np.arange(self.cmap.N, dtype=np.int16) if self.extend in ('both', 'min'): v[0] = -1 if self.extend in ('both', 'max'): v[-1] = self.cmap.N self._boundaries = b self._values = v return elif isinstance(self.norm, colors.BoundaryNorm): b = list(self.norm.boundaries) if self.extend in ('both', 'min'): b = [b[0]-1] + b if self.extend in ('both', 'max'): b = b + [b[-1] + 1] b = np.array(b) v = np.zeros((len(b)-1,), dtype=float) bi = self.norm.boundaries v[self._inside] = 0.5*(bi[:-1] + bi[1:]) if self.extend in ('both', 'min'): v[0] = b[0] - 1 if self.extend in ('both', 'max'): v[-1] = b[-1] + 1 self._boundaries = b self._values = v return else: if not self.norm.scaled(): self.norm.vmin = 0 self.norm.vmax = 1 b = self.norm.inverse(self._uniform_y(self.cmap.N+1)) if self.extend in ('both', 'min'): b[0] = b[0] - 1 if self.extend in ('both', 'max'): b[-1] = b[-1] + 1 self._process_values(b) def _find_range(self): ''' Set :attr:`vmin` and :attr:`vmax` attributes to the first and last boundary excluding extended end boundaries. ''' b = self._boundaries[self._inside] self.vmin = b[0] self.vmax = b[-1] def _central_N(self): '''number of boundaries **before** extension of ends''' nb = len(self._boundaries) if self.extend == 'both': nb -= 2 elif self.extend in ('min', 'max'): nb -= 1 return nb def _extended_N(self): ''' Based on the colormap and extend variable, return the number of boundaries. ''' N = self.cmap.N + 1 if self.extend == 'both': N += 2 elif self.extend in ('min', 'max'): N += 1 return N def _uniform_y(self, N): ''' Return colorbar data coordinates for *N* uniformly spaced boundaries, plus ends if required. ''' if self.extend == 'neither': y = np.linspace(0, 1, N) else: if self.extend == 'both': y = np.zeros(N + 2, 'd') y[0] = -0.05 y[-1] = 1.05 elif self.extend == 'min': y = np.zeros(N + 1, 'd') y[0] = -0.05 else: y = np.zeros(N + 1, 'd') y[-1] = 1.05 y[self._inside] = np.linspace(0, 1, N) return y def _proportional_y(self): ''' Return colorbar data coordinates for the boundaries of a proportional colorbar. ''' if isinstance(self.norm, colors.BoundaryNorm): b = self._boundaries[self._inside] y = (self._boundaries - self._boundaries[0]) y = y / (self._boundaries[-1] - self._boundaries[0]) else: y = self.norm(self._boundaries.copy()) if self.extend in ('both', 'min'): y[0] = -0.05 if self.extend in ('both', 'max'): y[-1] = 1.05 yi = y[self._inside] norm = colors.Normalize(yi[0], yi[-1]) y[self._inside] = norm(yi) return y def _mesh(self): ''' Return X,Y, the coordinate arrays for the colorbar pcolormesh. These are suitable for a vertical colorbar; swapping and transposition for a horizontal colorbar are done outside this function. ''' x = np.array([0.0, 1.0]) if self.spacing == 'uniform': y = self._uniform_y(self._central_N()) else: y = self._proportional_y() self._y = y X, Y = np.meshgrid(x,y) if self.extend in ('min', 'both'): X[0,:] = 0.5 if self.extend in ('max', 'both'): X[-1,:] = 0.5 return X, Y def _locate(self, x): ''' Given a possible set of color data values, return the ones within range, together with their corresponding colorbar data coordinates. ''' if isinstance(self.norm, (colors.NoNorm, colors.BoundaryNorm)): b = self._boundaries xn = x xout = x else: # Do calculations using normalized coordinates so # as to make the interpolation more accurate. b = self.norm(self._boundaries, clip=False).filled() # We do our own clipping so that we can allow a tiny # bit of slop in the end point ticks to allow for # floating point errors. xn = self.norm(x, clip=False).filled() in_cond = (xn > -0.001) & (xn < 1.001) xn = np.compress(in_cond, xn) xout = np.compress(in_cond, x) # The rest is linear interpolation with clipping. y = self._y N = len(b) ii = np.minimum(np.searchsorted(b, xn), N-1) i0 = np.maximum(ii - 1, 0) #db = b[ii] - b[i0] db = np.take(b, ii) - np.take(b, i0) db = np.where(i0==ii, 1.0, db) #dy = y[ii] - y[i0] dy = np.take(y, ii) - np.take(y, i0) z = np.take(y, i0) + (xn-np.take(b,i0))*dy/db return xout, z def set_alpha(self, alpha): self.alpha = alpha class Colorbar(ColorbarBase): def __init__(self, ax, mappable, **kw): mappable.autoscale_None() # Ensure mappable.norm.vmin, vmax # are set when colorbar is called, # even if mappable.draw has not yet # been called. This will not change # vmin, vmax if they are already set. self.mappable = mappable kw['cmap'] = mappable.cmap kw['norm'] = mappable.norm kw['alpha'] = mappable.get_alpha() if isinstance(mappable, contour.ContourSet): CS = mappable kw['boundaries'] = CS._levels kw['values'] = CS.cvalues kw['extend'] = CS.extend #kw['ticks'] = CS._levels kw.setdefault('ticks', ticker.FixedLocator(CS.levels, nbins=10)) kw['filled'] = CS.filled ColorbarBase.__init__(self, ax, **kw) if not CS.filled: self.add_lines(CS) else: ColorbarBase.__init__(self, ax, **kw) def add_lines(self, CS): ''' Add the lines from a non-filled :class:`~matplotlib.contour.ContourSet` to the colorbar. ''' if not isinstance(CS, contour.ContourSet) or CS.filled: raise ValueError('add_lines is only for a ContourSet of lines') tcolors = [c[0] for c in CS.tcolors] tlinewidths = [t[0] for t in CS.tlinewidths] # The following was an attempt to get the colorbar lines # to follow subsequent changes in the contour lines, # but more work is needed: specifically, a careful # look at event sequences, and at how # to make one object track another automatically. #tcolors = [col.get_colors()[0] for col in CS.collections] #tlinewidths = [col.get_linewidth()[0] for lw in CS.collections] #print 'tlinewidths:', tlinewidths ColorbarBase.add_lines(self, CS.levels, tcolors, tlinewidths) def update_bruteforce(self, mappable): ''' Manually change any contour line colors. This is called when the image or contour plot to which this colorbar belongs is changed. ''' # We are using an ugly brute-force method: clearing and # redrawing the whole thing. The problem is that if any # properties have been changed by methods other than the # colorbar methods, those changes will be lost. self.ax.cla() self.draw_all() #if self.vmin != self.norm.vmin or self.vmax != self.norm.vmax: # self.ax.cla() # self.draw_all() if isinstance(self.mappable, contour.ContourSet): CS = self.mappable if not CS.filled: self.add_lines(CS) #if self.lines is not None: # tcolors = [c[0] for c in CS.tcolors] # self.lines.set_color(tcolors) #Fixme? Recalculate boundaries, ticks if vmin, vmax have changed. #Fixme: Some refactoring may be needed; we should not # be recalculating everything if there was a simple alpha # change. def make_axes(parent, **kw): orientation = kw.setdefault('orientation', 'vertical') fraction = kw.pop('fraction', 0.15) shrink = kw.pop('shrink', 1.0) aspect = kw.pop('aspect', 20) #pb = transforms.PBox(parent.get_position()) pb = parent.get_position(original=True).frozen() if orientation == 'vertical': pad = kw.pop('pad', 0.05) x1 = 1.0-fraction pb1, pbx, pbcb = pb.splitx(x1-pad, x1) pbcb = pbcb.shrunk(1.0, shrink).anchored('C', pbcb) anchor = (0.0, 0.5) panchor = (1.0, 0.5) else: pad = kw.pop('pad', 0.15) pbcb, pbx, pb1 = pb.splity(fraction, fraction+pad) pbcb = pbcb.shrunk(shrink, 1.0).anchored('C', pbcb) aspect = 1.0/aspect anchor = (0.5, 1.0) panchor = (0.5, 0.0) parent.set_position(pb1) parent.set_anchor(panchor) fig = parent.get_figure() cax = fig.add_axes(pbcb) cax.set_aspect(aspect, anchor=anchor, adjustable='box') return cax, kw make_axes.__doc__ =''' Resize and reposition a parent axes, and return a child axes suitable for a colorbar:: cax, kw = make_axes(parent, **kw) Keyword arguments may include the following (with defaults): *orientation* 'vertical' or 'horizontal' %s All but the first of these are stripped from the input kw set. Returns (cax, kw), the child axes and the reduced kw dictionary. ''' % make_axes_kw_doc
agpl-3.0
sandeepgupta2k4/tensorflow
tensorflow/examples/learn/iris_val_based_early_stopping.py
62
2827
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Example of DNNClassifier for Iris plant dataset, with early stopping.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import shutil from sklearn import datasets from sklearn import metrics from sklearn.cross_validation import train_test_split import tensorflow as tf learn = tf.contrib.learn def clean_folder(folder): """Cleans the given folder if it exists.""" try: shutil.rmtree(folder) except OSError: pass def main(unused_argv): iris = datasets.load_iris() x_train, x_test, y_train, y_test = train_test_split( iris.data, iris.target, test_size=0.2, random_state=42) x_train, x_val, y_train, y_val = train_test_split( x_train, y_train, test_size=0.2, random_state=42) val_monitor = learn.monitors.ValidationMonitor( x_val, y_val, early_stopping_rounds=200) model_dir = '/tmp/iris_model' clean_folder(model_dir) # classifier with early stopping on training data classifier1 = learn.DNNClassifier( feature_columns=learn.infer_real_valued_columns_from_input(x_train), hidden_units=[10, 20, 10], n_classes=3, model_dir=model_dir) classifier1.fit(x=x_train, y=y_train, steps=2000) predictions1 = list(classifier1.predict(x_test, as_iterable=True)) score1 = metrics.accuracy_score(y_test, predictions1) model_dir = '/tmp/iris_model_val' clean_folder(model_dir) # classifier with early stopping on validation data, save frequently for # monitor to pick up new checkpoints. classifier2 = learn.DNNClassifier( feature_columns=learn.infer_real_valued_columns_from_input(x_train), hidden_units=[10, 20, 10], n_classes=3, model_dir=model_dir, config=tf.contrib.learn.RunConfig(save_checkpoints_secs=1)) classifier2.fit(x=x_train, y=y_train, steps=2000, monitors=[val_monitor]) predictions2 = list(classifier2.predict(x_test, as_iterable=True)) score2 = metrics.accuracy_score(y_test, predictions2) # In many applications, the score is improved by using early stopping print('score1: ', score1) print('score2: ', score2) print('score2 > score1: ', score2 > score1) if __name__ == '__main__': tf.app.run()
apache-2.0
nikhilgahlawat/ThinkStats2
code/populations.py
68
2609
"""This file contains code used in "Think Stats", by Allen B. Downey, available from greenteapress.com Copyright 2010 Allen B. Downey License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html """ from __future__ import print_function import csv import logging import sys import numpy as np import pandas import thinkplot import thinkstats2 def ReadData(filename='PEP_2012_PEPANNRES_with_ann.csv'): """Reads filename and returns populations in thousands filename: string returns: pandas Series of populations in thousands """ df = pandas.read_csv(filename, header=None, skiprows=2, encoding='iso-8859-1') populations = df[7] populations.replace(0, np.nan, inplace=True) return populations.dropna() def MakeFigures(): """Plots the CDF of populations in several forms. On a log-log scale the tail of the CCDF looks like a straight line, which suggests a Pareto distribution, but that turns out to be misleading. On a log-x scale the distribution has the characteristic sigmoid of a lognormal distribution. The normal probability plot of log(sizes) confirms that the data fit the lognormal model very well. Many phenomena that have been described with Pareto models can be described as well, or better, with lognormal models. """ pops = ReadData() print('Number of cities/towns', len(pops)) log_pops = np.log10(pops) cdf = thinkstats2.Cdf(pops, label='data') cdf_log = thinkstats2.Cdf(log_pops, label='data') # pareto plot xs, ys = thinkstats2.RenderParetoCdf(xmin=5000, alpha=1.4, low=0, high=1e7) thinkplot.Plot(np.log10(xs), 1-ys, label='model', color='0.8') thinkplot.Cdf(cdf_log, complement=True) thinkplot.Config(xlabel='log10 population', ylabel='CCDF', yscale='log') thinkplot.Save(root='populations_pareto') # lognormal plot thinkplot.PrePlot(cols=2) mu, sigma = log_pops.mean(), log_pops.std() xs, ps = thinkstats2.RenderNormalCdf(mu, sigma, low=0, high=8) thinkplot.Plot(xs, ps, label='model', color='0.8') thinkplot.Cdf(cdf_log) thinkplot.Config(xlabel='log10 population', ylabel='CDF') thinkplot.SubPlot(2) thinkstats2.NormalProbabilityPlot(log_pops, label='data') thinkplot.Config(xlabel='z', ylabel='log10 population', xlim=[-5, 5]) thinkplot.Save(root='populations_normal') def main(): thinkstats2.RandomSeed(17) MakeFigures() if __name__ == "__main__": main()
gpl-3.0
uberdugo/mlia
Ch05/EXTRAS/plot2D.py
7
1276
''' Created on Oct 6, 2010 @author: Peter ''' from numpy import * import matplotlib import matplotlib.pyplot as plt from matplotlib.patches import Rectangle import logRegres dataMat,labelMat=logRegres.loadDataSet() dataArr = array(dataMat) weights = logRegres.stocGradAscent0(dataArr,labelMat) n = shape(dataArr)[0] #number of points to create xcord1 = []; ycord1 = [] xcord2 = []; ycord2 = [] markers =[] colors =[] for i in range(n): if int(labelMat[i])== 1: xcord1.append(dataArr[i,1]); ycord1.append(dataArr[i,2]) else: xcord2.append(dataArr[i,1]); ycord2.append(dataArr[i,2]) fig = plt.figure() ax = fig.add_subplot(111) #ax.scatter(xcord,ycord, c=colors, s=markers) type1 = ax.scatter(xcord1, ycord1, s=30, c='red', marker='s') type2 = ax.scatter(xcord2, ycord2, s=30, c='green') x = arange(-3.0, 3.0, 0.1) #weights = [-2.9, 0.72, 1.29] #weights = [-5, 1.09, 1.42] weights = [13.03822793, 1.32877317, -1.96702074] weights = [4.12, 0.48, -0.6168] y = (-weights[0]-weights[1]*x)/weights[2] type3 = ax.plot(x, y) #ax.legend([type1, type2, type3], ["Did Not Like", "Liked in Small Doses", "Liked in Large Doses"], loc=2) #ax.axis([-5000,100000,-2,25]) plt.xlabel('X1') plt.ylabel('X2') plt.show()
gpl-3.0
LarsDu/DeepNuc
deepnuc/nucbinaryclassifier.py
2
15464
import tensorflow as tf import numpy as np import sklearn.metrics as metrics #from databatcher import DataBatcher import nucconvmodel #import dubiotools as dbt import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import pprint from itertools import cycle import os import sys #Logging imports from logger import Logger from nucinference import NucInference from collections import OrderedDict class NucBinaryClassifier(NucInference): use_onehot_labels = True def __init__(self, sess, train_batcher, test_batcher, num_epochs, learning_rate, batch_size, seq_len, save_dir, keep_prob=0.5, beta1=0.9, concat_revcom_input=False, nn_method_key="inferenceA", pos_index=1): """NucBinaryClassifier encapsulates training and data evaluation for :param sess: tf.Session() object :param train_batcher: DataBatcher object for training set :param test_batcher: DataBatcher object for test set :param num_epochs: Number of epoch cycles to perform training :param learning_rate: Learning rate :param batch_size: Mini-batch pull size :param seq_len: Sequence length :param save_dir: Root save directory for binary classification model :param keep_prob: Probability of keeping weight for dropout regularization :param beta1: Beta1 parameter for AdamOptimizer :param concat_revcom_input: If true, concatenate reverse complement of nucleotide sequence to input vector :param nn_method_key: Dictionary key for inference method found in nucconvmodels.py file. Determines which model to use. Example: "inferenceA" will run nucconvmodels.inferenceA :param pos_index: The index to use for the positive class (defaults to 1) :returns: a NucBinaryClassifier object :rtype: NucBinaryClassifier """ super(NucBinaryClassifier, self).__init__(sess, train_batcher, test_batcher, num_epochs, learning_rate, batch_size, seq_len, save_dir, keep_prob, beta1, concat_revcom_input, nn_method_key="inferenceA") if self.train_batcher.num_classes != 2: print "Error, more than two classes detected in train batcher" else: self.num_classes = 2 #The index for the label that should be considered the positive class self.pos_index=pos_index self.save_on_epoch = 5 def build_model(self): self.dna_seq_placeholder = tf.placeholder(tf.float32, shape=[None,self.seq_len,4], name="dna_seq") self.labels_placeholder = tf.placeholder(tf.float32, shape=[None, self.num_classes], name="labels") self.keep_prob_placeholder = tf.placeholder(tf.float32,name="keep_prob") self.logits, self.network = self.nn_method(self.dna_seq_placeholder, self.keep_prob_placeholder, self.num_classes) self.probs = tf.nn.softmax(self.logits) self.loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(labels=self.labels_placeholder, logits=self.logits)) ''' Calculate metrics. num_true positives is the number of true positives for the current batch Table below shows index if tf.argmax is applied +-----+-----------+---------+ | | Classifier| Label | +-----+-----------+---------+ | TP | 1 | 1 | +-----+-----------+---------+ | FP | 1 | 0 | +-----+-----------+---------+ | TN | 0 | 0 | +-----+-----------+---------+ | FN | 0 | 1 | +-----+-----------+---------+ Precision = TP/(TP+FP) Recall = TP/(TP+FN) F1-score = 2*(Prec*Rec)/(Prec+Rec) # Note: I ended up not using the tp,fp,tn,fn ops because I ended up calculating # these metrics using sklearn. ''' #correct = TN+TP #Used for calculating accuracy self.logits_ind = tf.argmax(self.logits,1) self.labels_ind = tf.argmax(self.labels_placeholder,1) #Create max_mask of logits (ie: [-.5,.5] --> [0 1]. Note logits have # shape [batch_size * num_classes= 2] #self.inverse_logits_col = tf.ones_like(self.logits_ind) - self.logits_ind #self.max_mask_logits = tf.concat([self.inverse_logits_col,self.logits_ind],1) #True positives where logits_ind+labels_ind == 2 #True negatives where logits_ind+labels_ind == 0 self.sum_ind = tf.add(self.logits_ind,self.labels_ind) self.true_positives = tf.equal(self.sum_ind,2*tf.ones_like(self.sum_ind)) #bool self.num_true_positives =tf.reduce_sum(tf.cast(self.true_positives, tf.int32)) #For FP classifier index > label index self.false_positives=tf.greater(self.logits_ind,self.labels_ind) self.num_false_positives = tf.reduce_sum(tf.cast(self.false_positives, tf.int32)) self.true_negatives = tf.equal(self.sum_ind,tf.zeros_like(self.sum_ind)) #bool self.num_true_negatives= tf.reduce_sum(tf.cast(self.true_negatives,tf.int32)) #For FN classifier index < label index self.false_negatives=tf.less(self.logits_ind,self.labels_ind) self.num_false_negatives = tf.reduce_sum(tf.cast(self.false_negatives,tf.int32)) #num correct can be used to calculate accuracy self.correct = tf.equal(self.logits_ind,self.labels_ind) self.num_correct= tf.reduce_sum(tf.cast(self.correct, tf.int32)) self.relevance =self.network.relevance_backprop(tf.multiply(self.logits, self.labels_placeholder)) '''Write and consolidate summaries''' self.loss_summary = tf.summary.scalar('loss',self.loss) self.summary_writer = tf.summary.FileWriter(self.summary_dir,self.sess.graph) self.summary_op = tf.summary.merge([self.loss_summary]) #Note: Do not use tf.summary.merge_all() here. This will break encapsulation for # cross validation and lead to crashes when training multiple models # Add gradient ops to graph with learning rate self.train_op = tf.train.AdamOptimizer(self.learning_rate, beta1=self.beta1).minimize(self.loss) self.vars = tf.trainable_variables() self.var_names = [var.name for var in self.vars] #print "Trainable variables:\n" #for vname in self.var_names: # print vname self.saver = tf.train.Saver() self.init_op = tf.global_variables_initializer() #Important note: Restoring model does not require init_op. #In fact calling tf.global_variables_initializer() after loading a model #will overwrite loaded weights self.sess.run(self.init_op) self.load(self.checkpoint_dir) def eval_model_metrics(self, batcher, save_plots=False, image_name ='metrics.png', eval_batch_size=50): """ Note: This method only works for binary classification as auPRC and auROC graphs only apply to binary classificaton problems. TODO: Modify this code to perform auROC generation for one-vs-all in the case of multiclass classification. """ #Ref: http://scikit-learn.org/stable/modules/model_evaluation.html#roc-metrics ##auROC calculations #Keep batch size at 1 for now to ensure 1 full epoch is evaluated all_labels = np.zeros((batcher.num_records,self.num_classes), dtype = np.float32) all_probs = np.zeros((batcher.num_records,self.num_classes), dtype = np.float32) #num_correct = 0 #counts number of correct predictions num_whole_pulls = batcher.num_records//eval_batch_size num_single_pulls = batcher.num_records%eval_batch_size num_steps = num_whole_pulls+num_single_pulls for i in range(num_steps): if i<num_whole_pulls: batch_size=eval_batch_size else: batch_size=1 labels_batch, dna_seq_batch = batcher.pull_batch(batch_size) feed_dict = { self.dna_seq_placeholder:dna_seq_batch, self.labels_placeholder:labels_batch, self.keep_prob_placeholder:1.0 } cur_prob= self.sess.run(self.probs,feed_dict=feed_dict) #Fill labels array if batch_size > 1: start_ind = batch_size*i elif batch_size == 1: start_ind = num_whole_pulls*eval_batch_size+(i-num_whole_pulls) else: print "Never reach this condition" all_labels[start_ind:start_ind+batch_size,:] = labels_batch all_probs[start_ind:start_ind+batch_size,:] = cur_prob #Calculate metrics and save results in a dict md = self.calc_classifier_metrics(all_labels,all_probs) md["epoch"]=self.epoch md["step"]=self.step #print "Testing accuracy",float(num_correct)/float(batcher.num_records) print 'Num examples: %d Num correct: %d Accuracy: %0.04f' % \ (batcher.num_records, md["num_correct"], md["accuracy"])+'\n' if save_plots: ###Plot some metrics plot_colors = cycle(['cyan','blue','orange','teal']) #print "Labels shape",all_labels.shape #print "Probs shape",all_probs.shape #print "Preds shape",all_preds.shape #Generate auROC plot axes fig1,ax1 = plt.subplots(2) fig1.subplots_adjust(bottom=0.2) ax1[0].plot([0,1],[0,1],color='navy',lw=2,linestyle='--') ax1[0].set_xbound(0.0,1.0) ax1[0].set_ybound(0.0,1.05) ax1[0].set_xlabel('False Positive Rate') ax1[0].set_ylabel('True Positive Rate') ax1[0].set_title('auROC') #plt.legend(loc='lower right') ax1[0].plot(md["fpr"],md["tpr"],color=plot_colors.next(), lw=2,linestyle='-',label='auROC curve (area=%0.2f)' % md["auroc"] ) #Generate auPRC plot axes #ax1[1].plot([0,1],[1,1],color='royalblue',lw=2,linestyle='--') ax1[1].set_xlabel('Precision') ax1[1].set_ylabel('Recall') ax1[1].set_title('auPRC') ax1[1].plot(md["thresh_precision"],md["thresh_recall"],color=plot_colors.next(), lw=2,linestyle='-',label='auPRC curve (area=%0.2f)' % md["auprc"] ) ax1[1].set_xbound(0.0,1.0) ax1[1].set_ybound(0.0,1.05) #Note: avg prec score is the area under the prec recall curve #Note: Presumably class 1 (pos examples) should be the only f1 score we focus on #print "F1 score for class",i,"is",f1_score plt.tight_layout() plt_fname = self.save_dir+os.sep+image_name print "Saving auROC image to",plt_fname fig1.savefig(plt_fname) #Return metrics dictionary return md def calc_classifier_metrics(self,all_labels,all_probs): """Calculate some metrics for the dataset return dictionary with metrics :param all_probs: nx2 prob values :param all_labels: nx2 labels :returns: dictionary of metrics :rtype: dict() """ num_records = all_probs.shape[0] all_preds = np.zeros((num_records, self.num_classes),dtype = np.float32) all_preds[np.arange(num_records),all_probs.argmax(1)] = 1 #Calculate accuracy num_correct = metrics.accuracy_score(all_labels[:,self.pos_index],all_preds[:,self.pos_index],normalize=False) accuracy = num_correct/float(all_preds.shape[0]) ###Calculate auROC #http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html #metrics.roc_curve(y_true, y_score[, ...]) #y_score is probs fpr,tpr,_ = metrics.roc_curve(all_labels[:,self.pos_index], all_probs[:,self.pos_index], pos_label=self.pos_index) auroc = metrics.auc(fpr,tpr) thresh_precision,thresh_recall,prc_thresholds = metrics.precision_recall_curve( all_labels[:,self.pos_index], all_probs[:,self.pos_index]) #Calculate precision, recall, and f1-score for threshold = 0.5 #confusion_matrix = metrics.confusion_matrix(all_labels[:,self.pos_index],all_probs[:,self.pos_index]) precision, recall, f1_score, support = metrics.precision_recall_fscore_support( all_labels[:,self.pos_index], all_preds[:,self.pos_index], pos_label=self.pos_index) precision = precision[self.pos_index] recall = recall[self.pos_index] f1_score = f1_score[self.pos_index] support = support[self.pos_index] auprc = metrics.average_precision_score(all_labels[:,self.pos_index], all_probs[:,self.pos_index]) return OrderedDict([ ("num_correct",num_correct), ("accuracy",accuracy), ("auroc",auroc), ("auprc",auprc), ("fpr",fpr), ("tpr",tpr), ("precision",precision), ("recall",recall), ("f1_score",f1_score), ("support",support), ("thresh_precision",thresh_precision), ("thresh_recall",thresh_recall), ("prc_thresholds",prc_thresholds) ])
gpl-3.0
ssh0/growing-string
triangular_lattice/diecutting/result_count_on_edge.py
1
9360
#!/usr/bin/env python # -*- coding:utf-8 -*- # # written by Shotaro Fujimoto # 2016-12-16 import matplotlib.pyplot as plt # from mpl_toolkits.mplot3d.axes3d import Axes3D import matplotlib.cm as cm import numpy as np import set_data_path class Visualizer(object): def __init__(self, subjects): self.data_path_list = set_data_path.data_path if len(subjects) != 0: for subject in subjects: getattr(self, 'result_' + subject)() def load_data(self, _path): data = np.load(_path) beta = data['beta'] try: size_dist_ave = data['size_dist_ave'] if len(size_dist_ave) == 0: raise KeyError return self.load_data_averaged(_path) except KeyError: pass num_of_strings = data['num_of_strings'] frames = data['frames'] Ls = data['Ls'].astype(np.float) # Ls = (3 * Ls * (Ls + 1) + 1) size_dist = data['size_dist'] N0 = np.array([l[1] for l in size_dist], dtype=np.float) / num_of_strings n0 = N0[1:] S = np.array([np.sum(l) for l in size_dist], dtype=np.float) / num_of_strings n1 = (S[1:] - n0) * 2. N = [] for l in size_dist: dot = np.dot(np.arange(len(l)), np.array(l).T) N.append(dot) # N = np.array([np.dot(np.arange(len(l)), np.array(l).T) for l in size_dist]) N_all = 3. * Ls * (Ls + 1.) + 1 N = np.array(N, dtype=np.float) / num_of_strings N_minus = N_all - N N_minus_rate = N_minus / N_all n_minus = N_minus[1:] - N_minus[:-1] n1_ave = n1 / np.sum(n1) n2 = (6 * Ls[1:]) - (n0 + n1 + n_minus) self.beta = beta self.num_of_strings = num_of_strings self.frames = frames self.Ls = Ls self.N = N self.N_minus = N_minus self.N_minus_rate = N_minus_rate self.S = S self.n0 = n0 self.n1 = n1 self.n2 = n2 self.n_minus = n_minus self.n1_ave = n1_ave def load_data_averaged(self, _path): data = np.load(_path) beta = data['beta'] num_of_strings = data['num_of_strings'] frames = data['frames'] Ls = data['Ls'].astype(np.float) # Ls = (3 * Ls * (Ls + 1) + 1) # size_dist = data['size_dist'] size_dist_ave = data['size_dist_ave'] N0 = np.array([l[1] for l in size_dist_ave], dtype=np.float) n0 = N0[1:] S = np.array([np.sum(l) for l in size_dist_ave], dtype=np.float) n1 = (S[1:] - n0) * 2. N = [] for l in size_dist_ave: dot = np.dot(np.arange(len(l)), np.array(l).T) N.append(dot) # N = np.array([np.dot(np.arange(len(l)), np.array(l).T) for l in size_dist_ave]) N_all = 3. * Ls * (Ls + 1.) + 1 N = np.array(N, dtype=np.float) N_minus = N_all - N N_minus_rate = N_minus / N_all n_minus = N_minus[1:] - N_minus[:-1] n1_ave = n1 / np.sum(n1) n2 = (6 * Ls[1:]) - (n0 + n1 + n_minus) self.beta = beta self.num_of_strings = num_of_strings self.frames = frames self.Ls = Ls self.N = N self.N_all = N_all self.N_minus = N_minus self.N_minus_rate = N_minus_rate self.S = S self.n_all = 6 * Ls[1:] self.n0 = n0 self.n1 = n1 self.n2 = n2 self.n_minus = n_minus self.n1_ave = n1_ave def result_N(self): fig, ax = plt.subplots() for i, result_data_path in enumerate(self.data_path_list): self.load_data(result_data_path) ax.plot(self.Ls[1:], self.N[1:], '.', label=r'$\beta = %2.2f$' % self.beta, color=cm.viridis(float(i) / len(self.data_path_list))) ax.legend(loc='best') ax.set_title('Occupied points in the cutting region' + ' (sample: {})'.format(self.num_of_strings)) ax.set_xlabel(r'Cutting size $L$') ax.set_ylabel(r'$N$') plt.show() def result_N_minus_rate(self): fig, ax = plt.subplots() for i, result_data_path in enumerate(self.data_path_list): self.load_data(result_data_path) ax.plot(self.Ls[1:], self.N_minus_rate[1:], '.', label=r'$\beta = %2.2f$' % self.beta, color=cm.viridis(float(i) / len(self.data_path_list))) ax.legend(loc='best') ax.set_title('The rate of not occupied site in all N' + ' (sample: {})'.format(self.num_of_strings)) ax.set_xlabel(r'Cutting size $L$') ax.set_ylabel(r'$N_{-1} / N_{\mathrm{all}}$') plt.show() def result_n0(self): fig, ax = plt.subplots() for i, result_data_path in enumerate(self.data_path_list): self.load_data(result_data_path) ax.plot(self.Ls[1:], self.n0, '.', label=r'$\beta = %2.2f$' % self.beta, color=cm.viridis(float(i) / len(self.data_path_list))) ax.legend(loc='best') ax.set_title('Averaged number of the sites which is the only member of \ a subcluster on the cutting edges.' + ' (sample: {})'.format(self.num_of_strings)) ax.set_xlabel(r'Cutting size $L$') ax.set_ylabel(r'$n_{0}$') plt.show() def result_n1(self): fig, ax = plt.subplots() for i, result_data_path in enumerate(self.data_path_list): self.load_data(result_data_path) ax.plot(self.Ls[1:], self.n1, '.', label=r'$\beta = %2.2f$' % self.beta, color=cm.viridis(float(i) / len(self.data_path_list))) ax.legend(loc='best') ax.set_title('Averaged number of the sites which is connected to a \ existing subcluster on the cutting edges.' + ' (sample: {})'.format(self.num_of_strings)) ax.set_xlabel(r'Cutting size $L$') ax.set_ylabel(r'$n_{1}$') plt.show() def result_n2(self): fig, ax = plt.subplots() for i, result_data_path in enumerate(self.data_path_list): self.load_data(result_data_path) ax.plot(self.Ls[1:], self.n2, '.', label=r'$\beta = %2.2f$' % self.beta, color=cm.viridis(float(i) / len(self.data_path_list))) ax.legend(loc='best') ax.set_title('Averaged number of the sites on the cutting edges which \ is connected to two neighbors.' + ' (sample: {})'.format(self.num_of_strings)) ax.set_xlabel(r'Cutting size $L$') ax.set_ylabel(r'$n_{2}$') plt.show() def result_n_minus(self): fig, ax = plt.subplots() for i, result_data_path in enumerate(self.data_path_list): self.load_data(result_data_path) ax.plot(self.Ls[1:], self.n_minus, '.', label=r'$\beta = %2.2f$' % self.beta, color=cm.viridis(float(i) / len(self.data_path_list))) ax.legend(loc='best') ax.set_title('Averaged number of the sites which is not occupied on \ the cutting edges.' + ' (sample: {})'.format(self.num_of_strings)) ax.set_xlabel(r'Cutting size $L$') ax.set_ylabel(r'$n_{-1}$') plt.show() def result_S(self): fig, ax = plt.subplots() for i, result_data_path in enumerate(self.data_path_list): self.load_data(result_data_path) ax.plot(self.Ls[1:], self.S[1:] / np.sum(self.S[1:]), '.', label=r'$\beta = %2.2f$' % self.beta, color=cm.viridis(float(i) / len(self.data_path_list))) ax.legend(loc='best') ax.set_ylim([0, ax.get_ylim()[1]]) ax.set_title('Averaged number of the subclusters in the cutted region.' + ' (sample: {})'.format(self.num_of_strings)) ax.set_xlabel(r'Cutting size $L$') ax.set_ylabel(r'$S$') plt.show() def result_S_rate(self): fig, ax = plt.subplots() for i, result_data_path in enumerate(self.data_path_list): self.load_data(result_data_path) # ax.plot(self.Ls[1:], self.S[1:] / np.sum(self.S[1:]), '.', # ax.plot(self.Ls[1:], self.S[1:] / self.n_all, '.', ax.plot(self.Ls[1:], self.S[1:] / self.N[1:], '.', label=r'$\beta = %2.2f$' % self.beta, color=cm.viridis(float(i) / len(self.data_path_list))) ax.legend(loc='best') ax.set_ylim([0, ax.get_ylim()[1]]) ax.set_title('Averaged number of the subclusters in the cutted region' + ' (normalized)' + ' (sample: {})'.format(self.num_of_strings)) ax.set_xlabel(r'Cutting size $L$') ax.set_ylabel(r'$S$') plt.show() if __name__ == '__main__': # subject: 'N', 'N_minus_rate', 'n0', 'n1', 'n2', 'n_minus', 'S' main = Visualizer( [ # 'N', # 'N_minus_rate', # 'n0', # 'n1', # 'n2', # 'n_minus', 'S', # 'S_rate' ] )
mit