prompt
stringlengths
123
92.3k
completion
stringlengths
7
132
api
stringlengths
9
35
"""Test the search module""" from collections.abc import Iterable, Sized from io import StringIO from itertools import chain, product from functools import partial import pickle import sys from types import GeneratorType import re import numpy as np import scipy.sparse as sp import pytest from sklearn.utils.fixes import sp_version from sklearn.utils._testing import assert_raises from sklearn.utils._testing import assert_warns from sklearn.utils._testing import assert_warns_message from sklearn.utils._testing import assert_raise_message from sklearn.utils._testing import assert_array_equal from sklearn.utils._testing import assert_array_almost_equal from sklearn.utils._testing import assert_allclose from sklearn.utils._testing import assert_almost_equal from sklearn.utils._testing import ignore_warnings from sklearn.utils._mocking import CheckingClassifier, MockDataFrame from scipy.stats import bernoulli, expon, uniform from sklearn.base import BaseEstimator, ClassifierMixin from sklearn.base import clone from sklearn.exceptions import NotFittedError from sklearn.datasets import make_classification from sklearn.datasets import make_blobs from sklearn.datasets import make_multilabel_classification from sklearn.model_selection import fit_grid_point from sklearn.model_selection import train_test_split from sklearn.model_selection import KFold from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import StratifiedShuffleSplit from sklearn.model_selection import LeaveOneGroupOut from sklearn.model_selection import LeavePGroupsOut from sklearn.model_selection import GroupKFold from sklearn.model_selection import GroupShuffleSplit from sklearn.model_selection import GridSearchCV from sklearn.model_selection import RandomizedSearchCV from sklearn.model_selection import ParameterGrid from sklearn.model_selection import ParameterSampler from sklearn.model_selection._search import BaseSearchCV from sklearn.model_selection._validation import FitFailedWarning from sklearn.svm import LinearSVC, SVC from sklearn.tree import DecisionTreeRegressor from sklearn.tree import DecisionTreeClassifier from sklearn.cluster import KMeans from sklearn.neighbors import KernelDensity from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import f1_score from sklearn.metrics import recall_score from sklearn.metrics import accuracy_score from sklearn.metrics import make_scorer from sklearn.metrics import roc_auc_score from sklearn.metrics.pairwise import euclidean_distances from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline from sklearn.linear_model import Ridge, SGDClassifier, LinearRegression from sklearn.experimental import enable_hist_gradient_boosting # noqa from sklearn.ensemble import HistGradientBoostingClassifier from sklearn.model_selection.tests.common import OneTimeSplitter # Neither of the following two estimators inherit from BaseEstimator, # to test hyperparameter search on user-defined classifiers. class MockClassifier: """Dummy classifier to test the parameter search algorithms""" def __init__(self, foo_param=0): self.foo_param = foo_param def fit(self, X, Y): assert len(X) == len(Y) self.classes_ = np.unique(Y) return self def predict(self, T): return T.shape[0] def transform(self, X): return X + self.foo_param def inverse_transform(self, X): return X - self.foo_param predict_proba = predict predict_log_proba = predict decision_function = predict def score(self, X=None, Y=None): if self.foo_param > 1: score = 1. else: score = 0. return score def get_params(self, deep=False): return {'foo_param': self.foo_param} def set_params(self, **params): self.foo_param = params['foo_param'] return self class LinearSVCNoScore(LinearSVC): """An LinearSVC classifier that has no score method.""" @property def score(self): raise AttributeError X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) y = np.array([1, 1, 2, 2]) def assert_grid_iter_equals_getitem(grid): assert list(grid) == [grid[i] for i in range(len(grid))] @pytest.mark.parametrize("klass", [ParameterGrid, partial(ParameterSampler, n_iter=10)]) @pytest.mark.parametrize( "input, error_type, error_message", [(0, TypeError, r'Parameter .* is not a dict or a list \(0\)'), ([{'foo': [0]}, 0], TypeError, r'Parameter .* is not a dict \(0\)'), ({'foo': 0}, TypeError, "Parameter.* value is not iterable .*" r"\(key='foo', value=0\)")] ) def test_validate_parameter_input(klass, input, error_type, error_message): with pytest.raises(error_type, match=error_message): klass(input) def test_parameter_grid(): # Test basic properties of ParameterGrid. params1 = {"foo": [1, 2, 3]} grid1 = ParameterGrid(params1) assert isinstance(grid1, Iterable) assert isinstance(grid1, Sized) assert len(grid1) == 3 assert_grid_iter_equals_getitem(grid1) params2 = {"foo": [4, 2], "bar": ["ham", "spam", "eggs"]} grid2 = ParameterGrid(params2) assert len(grid2) == 6 # loop to assert we can iterate over the grid multiple times for i in range(2): # tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2) points = set(tuple(chain(*(sorted(p.items())))) for p in grid2) assert (points == set(("bar", x, "foo", y) for x, y in product(params2["bar"], params2["foo"]))) assert_grid_iter_equals_getitem(grid2) # Special case: empty grid (useful to get default estimator settings) empty = ParameterGrid({}) assert len(empty) == 1 assert list(empty) == [{}] assert_grid_iter_equals_getitem(empty) assert_raises(IndexError, lambda: empty[1]) has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}]) assert len(has_empty) == 4 assert list(has_empty) == [{'C': 1}, {'C': 10}, {}, {'C': .5}] assert_grid_iter_equals_getitem(has_empty) def test_grid_search(): # Test that the best estimator contains the right value for foo_param clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=3, verbose=3) # make sure it selects the smallest parameter in case of ties old_stdout = sys.stdout sys.stdout = StringIO() grid_search.fit(X, y) sys.stdout = old_stdout assert grid_search.best_estimator_.foo_param == 2 assert_array_equal(grid_search.cv_results_["param_foo_param"].data, [1, 2, 3]) # Smoke test the score etc: grid_search.score(X, y) grid_search.predict_proba(X) grid_search.decision_function(X) grid_search.transform(X) # Test exception handling on scoring grid_search.scoring = 'sklearn' assert_raises(ValueError, grid_search.fit, X, y) def test_grid_search_pipeline_steps(): # check that parameters that are estimators are cloned before fitting pipe = Pipeline([('regressor', LinearRegression())]) param_grid = {'regressor': [LinearRegression(), Ridge()]} grid_search = GridSearchCV(pipe, param_grid, cv=2) grid_search.fit(X, y) regressor_results = grid_search.cv_results_['param_regressor'] assert isinstance(regressor_results[0], LinearRegression) assert isinstance(regressor_results[1], Ridge) assert not hasattr(regressor_results[0], 'coef_') assert not hasattr(regressor_results[1], 'coef_') assert regressor_results[0] is not grid_search.best_estimator_ assert regressor_results[1] is not grid_search.best_estimator_ # check that we didn't modify the parameter grid that was passed assert not hasattr(param_grid['regressor'][0], 'coef_') assert not hasattr(param_grid['regressor'][1], 'coef_') @pytest.mark.parametrize("SearchCV", [GridSearchCV, RandomizedSearchCV]) def test_SearchCV_with_fit_params(SearchCV): X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = CheckingClassifier(expected_fit_params=['spam', 'eggs']) searcher = SearchCV( clf, {'foo_param': [1, 2, 3]}, cv=2, error_score="raise" ) # The CheckingClassifier generates an assertion error if # a parameter is missing or has length != len(X). err_msg = r"Expected fit parameter\(s\) \['eggs'\] not seen." with pytest.raises(AssertionError, match=err_msg): searcher.fit(X, y, spam=np.ones(10)) err_msg = "Fit parameter spam has length 1; expected" with pytest.raises(AssertionError, match=err_msg): searcher.fit(X, y, spam=np.ones(1), eggs=np.zeros(10)) searcher.fit(X, y, spam=np.ones(10), eggs=np.zeros(10)) @ignore_warnings def test_grid_search_no_score(): # Test grid-search on classifier that has no score function. clf = LinearSVC(random_state=0) X, y = make_blobs(random_state=0, centers=2) Cs = [.1, 1, 10] clf_no_score = LinearSVCNoScore(random_state=0) grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy') grid_search.fit(X, y) grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs}, scoring='accuracy') # smoketest grid search grid_search_no_score.fit(X, y) # check that best params are equal assert grid_search_no_score.best_params_ == grid_search.best_params_ # check that we can call score and that it gives the correct result assert grid_search.score(X, y) == grid_search_no_score.score(X, y) # giving no scoring function raises an error grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs}) assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit, [[1]]) def test_grid_search_score_method(): X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2, random_state=0) clf = LinearSVC(random_state=0) grid = {'C': [.1]} search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y) search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y) search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid, scoring='roc_auc' ).fit(X, y) search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y) # Check warning only occurs in situation where behavior changed: # estimator requires score method to compete with scoring parameter score_no_scoring = search_no_scoring.score(X, y) score_accuracy = search_accuracy.score(X, y) score_no_score_auc = search_no_score_method_auc.score(X, y) score_auc = search_auc.score(X, y) # ensure the test is sane assert score_auc < 1.0 assert score_accuracy < 1.0 assert score_auc != score_accuracy assert_almost_equal(score_accuracy, score_no_scoring) assert_almost_equal(score_auc, score_no_score_auc) def test_grid_search_groups(): # Check if ValueError (when groups is None) propagates to GridSearchCV # And also check if groups is correctly passed to the cv object rng = np.random.RandomState(0) X, y = make_classification(n_samples=15, n_classes=2, random_state=0) groups = rng.randint(0, 3, 15) clf = LinearSVC(random_state=0) grid = {'C': [1]} group_cvs = [LeaveOneGroupOut(), LeavePGroupsOut(2), GroupKFold(n_splits=3), GroupShuffleSplit()] for cv in group_cvs: gs = GridSearchCV(clf, grid, cv=cv) assert_raise_message(ValueError, "The 'groups' parameter should not be None.", gs.fit, X, y) gs.fit(X, y, groups=groups) non_group_cvs = [StratifiedKFold(), StratifiedShuffleSplit()] for cv in non_group_cvs: gs = GridSearchCV(clf, grid, cv=cv) # Should not raise an error gs.fit(X, y) def test_classes__property(): # Test that classes_ property matches best_estimator_.classes_ X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) Cs = [.1, 1, 10] grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs}) grid_search.fit(X, y) assert_array_equal(grid_search.best_estimator_.classes_, grid_search.classes_) # Test that regressors do not have a classes_ attribute grid_search = GridSearchCV(Ridge(), {'alpha': [1.0, 2.0]}) grid_search.fit(X, y) assert not hasattr(grid_search, 'classes_') # Test that the grid searcher has no classes_ attribute before it's fit grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs}) assert not hasattr(grid_search, 'classes_') # Test that the grid searcher has no classes_ attribute without a refit grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs}, refit=False) grid_search.fit(X, y) assert not hasattr(grid_search, 'classes_') def test_trivial_cv_results_attr(): # Test search over a "grid" with only one point. clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1]}, cv=3) grid_search.fit(X, y) assert hasattr(grid_search, "cv_results_") random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1, cv=3) random_search.fit(X, y) assert hasattr(grid_search, "cv_results_") def test_no_refit(): # Test that GSCV can be used for model selection alone without refitting clf = MockClassifier() for scoring in [None, ['accuracy', 'precision']]: grid_search = GridSearchCV( clf, {'foo_param': [1, 2, 3]}, refit=False, cv=3 ) grid_search.fit(X, y) assert not hasattr(grid_search, "best_estimator_") and \ hasattr(grid_search, "best_index_") and \ hasattr(grid_search, "best_params_") # Make sure the functions predict/transform etc raise meaningful # error messages for fn_name in ('predict', 'predict_proba', 'predict_log_proba', 'transform', 'inverse_transform'): assert_raise_message(NotFittedError, ('refit=False. %s is available only after ' 'refitting on the best parameters' % fn_name), getattr(grid_search, fn_name), X) # Test that an invalid refit param raises appropriate error messages for refit in ["", 5, True, 'recall', 'accuracy']: assert_raise_message(ValueError, "For multi-metric scoring, the " "parameter refit must be set to a scorer key", GridSearchCV(clf, {}, refit=refit, scoring={'acc': 'accuracy', 'prec': 'precision'} ).fit, X, y) def test_grid_search_error(): # Test that grid search will capture errors on data with different length X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) assert_raises(ValueError, cv.fit, X_[:180], y_) def test_grid_search_one_grid_point(): X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]} clf = SVC(gamma='auto') cv = GridSearchCV(clf, param_dict) cv.fit(X_, y_) clf = SVC(C=1.0, kernel="rbf", gamma=0.1) clf.fit(X_, y_) assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_) def test_grid_search_when_param_grid_includes_range(): # Test that the best estimator contains the right value for foo_param clf = MockClassifier() grid_search = None grid_search = GridSearchCV(clf, {'foo_param': range(1, 4)}, cv=3) grid_search.fit(X, y) assert grid_search.best_estimator_.foo_param == 2 def test_grid_search_bad_param_grid(): param_dict = {"C": 1} clf = SVC(gamma='auto') assert_raise_message( ValueError, "Parameter grid for parameter (C) needs to" " be a list or numpy array, but got (<class 'int'>)." " Single values need to be wrapped in a list" " with one element.", GridSearchCV, clf, param_dict) param_dict = {"C": []} clf = SVC() assert_raise_message( ValueError, "Parameter values for parameter (C) need to be a non-empty sequence.", GridSearchCV, clf, param_dict) param_dict = {"C": "1,2,3"} clf = SVC(gamma='auto') assert_raise_message( ValueError, "Parameter grid for parameter (C) needs to" " be a list or numpy array, but got (<class 'str'>)." " Single values need to be wrapped in a list" " with one element.", GridSearchCV, clf, param_dict) param_dict = {"C": np.ones((3, 2))} clf = SVC() assert_raises(ValueError, GridSearchCV, clf, param_dict) def test_grid_search_sparse(): # Test that grid search works with both dense and sparse matrices X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) cv.fit(X_[:180], y_[:180]) y_pred = cv.predict(X_[180:]) C = cv.best_estimator_.C X_ = sp.csr_matrix(X_) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) cv.fit(X_[:180].tocoo(), y_[:180]) y_pred2 = cv.predict(X_[180:]) C2 = cv.best_estimator_.C assert np.mean(y_pred == y_pred2) >= .9 assert C == C2 def test_grid_search_sparse_scoring(): X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1") cv.fit(X_[:180], y_[:180]) y_pred = cv.predict(X_[180:]) C = cv.best_estimator_.C X_ = sp.csr_matrix(X_) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1") cv.fit(X_[:180], y_[:180]) y_pred2 = cv.predict(X_[180:]) C2 = cv.best_estimator_.C assert_array_equal(y_pred, y_pred2) assert C == C2 # Smoke test the score # np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]), # cv.score(X_[:180], y[:180])) # test loss where greater is worse def f1_loss(y_true_, y_pred_): return -f1_score(y_true_, y_pred_) F1Loss = make_scorer(f1_loss, greater_is_better=False) cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss) cv.fit(X_[:180], y_[:180]) y_pred3 = cv.predict(X_[180:]) C3 = cv.best_estimator_.C assert C == C3 assert_array_equal(y_pred, y_pred3) def test_grid_search_precomputed_kernel(): # Test that grid search works when the input features are given in the # form of a precomputed kernel matrix X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) # compute the training kernel matrix corresponding to the linear kernel K_train = np.dot(X_[:180], X_[:180].T) y_train = y_[:180] clf = SVC(kernel='precomputed') cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) cv.fit(K_train, y_train) assert cv.best_score_ >= 0 # compute the test kernel matrix K_test = np.dot(X_[180:], X_[:180].T) y_test = y_[180:] y_pred = cv.predict(K_test) assert np.mean(y_pred == y_test) >= 0 # test error is raised when the precomputed kernel is not array-like # or sparse assert_raises(ValueError, cv.fit, K_train.tolist(), y_train) def test_grid_search_precomputed_kernel_error_nonsquare(): # Test that grid search returns an error with a non-square precomputed # training kernel matrix K_train = np.zeros((10, 20)) y_train = np.ones((10, )) clf = SVC(kernel='precomputed') cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) assert_raises(ValueError, cv.fit, K_train, y_train) class BrokenClassifier(BaseEstimator): """Broken classifier that cannot be fit twice""" def __init__(self, parameter=None): self.parameter = parameter def fit(self, X, y): assert not hasattr(self, 'has_been_fit_') self.has_been_fit_ = True def predict(self, X): return np.zeros(X.shape[0]) @ignore_warnings def test_refit(): # Regression test for bug in refitting # Simulates re-fitting a broken estimator; this used to break with # sparse SVMs. X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}], scoring="precision", refit=True) clf.fit(X, y) def test_refit_callable(): """ Test refit=callable, which adds flexibility in identifying the "best" estimator. """ def refit_callable(cv_results): """ A dummy function tests `refit=callable` interface. Return the index of a model that has the least `mean_test_score`. """ # Fit a dummy clf with `refit=True` to get a list of keys in # clf.cv_results_. X, y = make_classification(n_samples=100, n_features=4, random_state=42) clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.01, 0.1, 1]}, scoring='precision', refit=True) clf.fit(X, y) # Ensure that `best_index_ != 0` for this dummy clf assert clf.best_index_ != 0 # Assert every key matches those in `cv_results` for key in clf.cv_results_.keys(): assert key in cv_results return cv_results['mean_test_score'].argmin() X, y = make_classification(n_samples=100, n_features=4, random_state=42) clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.01, 0.1, 1]}, scoring='precision', refit=refit_callable) clf.fit(X, y) assert clf.best_index_ == 0 # Ensure `best_score_` is disabled when using `refit=callable` assert not hasattr(clf, 'best_score_') def test_refit_callable_invalid_type(): """ Test implementation catches the errors when 'best_index_' returns an invalid result. """ def refit_callable_invalid_type(cv_results): """ A dummy function tests when returned 'best_index_' is not integer. """ return None X, y = make_classification(n_samples=100, n_features=4, random_state=42) clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.1, 1]}, scoring='precision', refit=refit_callable_invalid_type) with pytest.raises(TypeError, match='best_index_ returned is not an integer'): clf.fit(X, y) @pytest.mark.parametrize('out_bound_value', [-1, 2]) @pytest.mark.parametrize('search_cv', [RandomizedSearchCV, GridSearchCV]) def test_refit_callable_out_bound(out_bound_value, search_cv): """ Test implementation catches the errors when 'best_index_' returns an out of bound result. """ def refit_callable_out_bound(cv_results): """ A dummy function tests when returned 'best_index_' is out of bounds. """ return out_bound_value X, y = make_classification(n_samples=100, n_features=4, random_state=42) clf = search_cv(LinearSVC(random_state=42), {'C': [0.1, 1]}, scoring='precision', refit=refit_callable_out_bound) with pytest.raises(IndexError, match='best_index_ index out of range'): clf.fit(X, y) def test_refit_callable_multi_metric(): """ Test refit=callable in multiple metric evaluation setting """ def refit_callable(cv_results): """ A dummy function tests `refit=callable` interface. Return the index of a model that has the least `mean_test_prec`. """ assert 'mean_test_prec' in cv_results return cv_results['mean_test_prec'].argmin() X, y = make_classification(n_samples=100, n_features=4, random_state=42) scoring = {'Accuracy': make_scorer(accuracy_score), 'prec': 'precision'} clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.01, 0.1, 1]}, scoring=scoring, refit=refit_callable) clf.fit(X, y) assert clf.best_index_ == 0 # Ensure `best_score_` is disabled when using `refit=callable` assert not hasattr(clf, 'best_score_') def test_gridsearch_nd(): # Pass X as list in GridSearchCV X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2) y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11) check_X = lambda x: x.shape[1:] == (5, 3, 2) check_y = lambda x: x.shape[1:] == (7, 11) clf = CheckingClassifier( check_X=check_X, check_y=check_y, methods_to_check=["fit"], ) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}) grid_search.fit(X_4d, y_3d).score(X, y) assert hasattr(grid_search, "cv_results_") def test_X_as_list(): # Pass X as list in GridSearchCV X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = CheckingClassifier( check_X=lambda x: isinstance(x, list), methods_to_check=["fit"], ) cv = KFold(n_splits=3) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv) grid_search.fit(X.tolist(), y).score(X, y) assert hasattr(grid_search, "cv_results_") def test_y_as_list(): # Pass y as list in GridSearchCV X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = CheckingClassifier( check_y=lambda x: isinstance(x, list), methods_to_check=["fit"], ) cv = KFold(n_splits=3) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv) grid_search.fit(X, y.tolist()).score(X, y) assert hasattr(grid_search, "cv_results_") @ignore_warnings def test_pandas_input(): # check cross_val_score doesn't destroy pandas dataframe types = [(MockDataFrame, MockDataFrame)] try: from pandas import Series, DataFrame types.append((DataFrame, Series)) except ImportError: pass X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) for InputFeatureType, TargetType in types: # X dataframe, y series X_df, y_ser = InputFeatureType(X), TargetType(y) def check_df(x): return isinstance(x, InputFeatureType) def check_series(x): return isinstance(x, TargetType) clf = CheckingClassifier(check_X=check_df, check_y=check_series) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}) grid_search.fit(X_df, y_ser).score(X_df, y_ser) grid_search.predict(X_df) assert hasattr(grid_search, "cv_results_") def test_unsupervised_grid_search(): # test grid-search with unsupervised estimator X, y = make_blobs(n_samples=50, random_state=0) km = KMeans(random_state=0, init="random", n_init=1) # Multi-metric evaluation unsupervised scoring = ['adjusted_rand_score', 'fowlkes_mallows_score'] for refit in ['adjusted_rand_score', 'fowlkes_mallows_score']: grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]), scoring=scoring, refit=refit) grid_search.fit(X, y) # Both ARI and FMS can find the right number :) assert grid_search.best_params_["n_clusters"] == 3 # Single metric evaluation unsupervised grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]), scoring='fowlkes_mallows_score') grid_search.fit(X, y) assert grid_search.best_params_["n_clusters"] == 3 # Now without a score, and without y grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4])) grid_search.fit(X) assert grid_search.best_params_["n_clusters"] == 4 def test_gridsearch_no_predict(): # test grid-search with an estimator without predict. # slight duplication of a test from KDE def custom_scoring(estimator, X): return 42 if estimator.bandwidth == .1 else 0 X, _ = make_blobs(cluster_std=.1, random_state=1, centers=[[0, 1], [1, 0], [0, 0]]) search = GridSearchCV(KernelDensity(), param_grid=dict(bandwidth=[.01, .1, 1]), scoring=custom_scoring) search.fit(X) assert search.best_params_['bandwidth'] == .1 assert search.best_score_ == 42 def test_param_sampler(): # test basic properties of param sampler param_distributions = {"kernel": ["rbf", "linear"], "C": uniform(0, 1)} sampler = ParameterSampler(param_distributions=param_distributions, n_iter=10, random_state=0) samples = [x for x in sampler] assert len(samples) == 10 for sample in samples: assert sample["kernel"] in ["rbf", "linear"] assert 0 <= sample["C"] <= 1 # test that repeated calls yield identical parameters param_distributions = {"C": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} sampler = ParameterSampler(param_distributions=param_distributions, n_iter=3, random_state=0) assert [x for x in sampler] == [x for x in sampler] if sp_version >= (0, 16): param_distributions = {"C": uniform(0, 1)} sampler = ParameterSampler(param_distributions=param_distributions, n_iter=10, random_state=0) assert [x for x in sampler] == [x for x in sampler] def check_cv_results_array_types(search, param_keys, score_keys): # Check if the search `cv_results`'s array are of correct types cv_results = search.cv_results_ assert all(isinstance(cv_results[param], np.ma.MaskedArray) for param in param_keys) assert all(cv_results[key].dtype == object for key in param_keys) assert not any(isinstance(cv_results[key], np.ma.MaskedArray) for key in score_keys) assert all(cv_results[key].dtype == np.float64 for key in score_keys if not key.startswith('rank')) scorer_keys = search.scorer_.keys() if search.multimetric_ else ['score'] for key in scorer_keys: assert cv_results['rank_test_%s' % key].dtype == np.int32 def check_cv_results_keys(cv_results, param_keys, score_keys, n_cand): # Test the search.cv_results_ contains all the required results assert_array_equal(sorted(cv_results.keys()), sorted(param_keys + score_keys + ('params',))) assert all(cv_results[key].shape == (n_cand,) for key in param_keys + score_keys) def test_grid_search_cv_results(): X, y = make_classification(n_samples=50, n_features=4, random_state=42) n_splits = 3 n_grid_points = 6 params = [dict(kernel=['rbf', ], C=[1, 10], gamma=[0.1, 1]), dict(kernel=['poly', ], degree=[1, 2])] param_keys = ('param_C', 'param_degree', 'param_gamma', 'param_kernel') score_keys = ('mean_test_score', 'mean_train_score', 'rank_test_score', 'split0_test_score', 'split1_test_score', 'split2_test_score', 'split0_train_score', 'split1_train_score', 'split2_train_score', 'std_test_score', 'std_train_score', 'mean_fit_time', 'std_fit_time', 'mean_score_time', 'std_score_time') n_candidates = n_grid_points search = GridSearchCV(SVC(), cv=n_splits, param_grid=params, return_train_score=True) search.fit(X, y) cv_results = search.cv_results_ # Check if score and timing are reasonable assert all(cv_results['rank_test_score'] >= 1) assert (all(cv_results[k] >= 0) for k in score_keys if k != 'rank_test_score') assert (all(cv_results[k] <= 1) for k in score_keys if 'time' not in k and k != 'rank_test_score') # Check cv_results structure check_cv_results_array_types(search, param_keys, score_keys) check_cv_results_keys(cv_results, param_keys, score_keys, n_candidates) # Check masking cv_results = search.cv_results_ n_candidates = len(search.cv_results_['params']) assert all((cv_results['param_C'].mask[i] and cv_results['param_gamma'].mask[i] and not cv_results['param_degree'].mask[i]) for i in range(n_candidates) if cv_results['param_kernel'][i] == 'linear') assert all((not cv_results['param_C'].mask[i] and not cv_results['param_gamma'].mask[i] and cv_results['param_degree'].mask[i]) for i in range(n_candidates) if cv_results['param_kernel'][i] == 'rbf') def test_random_search_cv_results(): X, y = make_classification(n_samples=50, n_features=4, random_state=42) n_splits = 3 n_search_iter = 30 params = [{'kernel': ['rbf'], 'C': expon(scale=10), 'gamma': expon(scale=0.1)}, {'kernel': ['poly'], 'degree': [2, 3]}] param_keys = ('param_C', 'param_degree', 'param_gamma', 'param_kernel') score_keys = ('mean_test_score', 'mean_train_score', 'rank_test_score', 'split0_test_score', 'split1_test_score', 'split2_test_score', 'split0_train_score', 'split1_train_score', 'split2_train_score', 'std_test_score', 'std_train_score', 'mean_fit_time', 'std_fit_time', 'mean_score_time', 'std_score_time') n_cand = n_search_iter search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_splits, param_distributions=params, return_train_score=True) search.fit(X, y) cv_results = search.cv_results_ # Check results structure check_cv_results_array_types(search, param_keys, score_keys) check_cv_results_keys(cv_results, param_keys, score_keys, n_cand) n_candidates = len(search.cv_results_['params']) assert all((cv_results['param_C'].mask[i] and cv_results['param_gamma'].mask[i] and not cv_results['param_degree'].mask[i]) for i in range(n_candidates) if cv_results['param_kernel'][i] == 'linear') assert all((not cv_results['param_C'].mask[i] and not cv_results['param_gamma'].mask[i] and cv_results['param_degree'].mask[i]) for i in range(n_candidates) if cv_results['param_kernel'][i] == 'rbf') @pytest.mark.parametrize( "SearchCV, specialized_params", [(GridSearchCV, {'param_grid': {'C': [1, 10]}}), (RandomizedSearchCV, {'param_distributions': {'C': [1, 10]}, 'n_iter': 2})] ) def test_search_default_iid(SearchCV, specialized_params): # Test the IID parameter TODO: Clearly this test does something else??? # noise-free simple 2d-data X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0, cluster_std=0.1, shuffle=False, n_samples=80) # split dataset into two folds that are not iid # first one contains data of all 4 blobs, second only from two. mask = np.ones(X.shape[0], dtype=np.bool) mask[np.where(y == 1)[0][::2]] = 0 mask[np.where(y == 2)[0][::2]] = 0 # this leads to perfect classification on one fold and a score of 1/3 on # the other # create "cv" for splits cv = [[mask, ~mask], [~mask, mask]] common_params = {'estimator': SVC(), 'cv': cv, 'return_train_score': True} search = SearchCV(**common_params, **specialized_params) search.fit(X, y) test_cv_scores = np.array( [search.cv_results_['split%d_test_score' % s][0] for s in range(search.n_splits_)] ) test_mean = search.cv_results_['mean_test_score'][0] test_std = search.cv_results_['std_test_score'][0] train_cv_scores = np.array( [search.cv_results_['split%d_train_score' % s][0] for s in range(search.n_splits_)] ) train_mean = search.cv_results_['mean_train_score'][0] train_std = search.cv_results_['std_train_score'][0] assert search.cv_results_['param_C'][0] == 1 # scores are the same as above assert_allclose(test_cv_scores, [1, 1. / 3.]) assert_allclose(train_cv_scores, [1, 1]) # Unweighted mean/std is used assert test_mean == pytest.approx(np.mean(test_cv_scores)) assert test_std == pytest.approx(np.std(test_cv_scores)) # For the train scores, we do not take a weighted mean irrespective of # i.i.d. or not assert train_mean == pytest.approx(1) assert train_std == pytest.approx(0) def test_grid_search_cv_results_multimetric(): X, y = make_classification(n_samples=50, n_features=4, random_state=42) n_splits = 3 params = [dict(kernel=['rbf', ], C=[1, 10], gamma=[0.1, 1]), dict(kernel=['poly', ], degree=[1, 2])] grid_searches = [] for scoring in ({'accuracy': make_scorer(accuracy_score), 'recall': make_scorer(recall_score)}, 'accuracy', 'recall'): grid_search = GridSearchCV(SVC(), cv=n_splits, param_grid=params, scoring=scoring, refit=False) grid_search.fit(X, y) grid_searches.append(grid_search) compare_cv_results_multimetric_with_single(*grid_searches) def test_random_search_cv_results_multimetric(): X, y = make_classification(n_samples=50, n_features=4, random_state=42) n_splits = 3 n_search_iter = 30 # Scipy 0.12's stats dists do not accept seed, hence we use param grid params = dict(C=
np.logspace(-4, 1, 3)
numpy.logspace
# ________ # / # \ / # \ / # \/ import random import textwrap import emd_mean import AdvEMDpy import emd_basis import emd_utils import numpy as np import pandas as pd import cvxpy as cvx import seaborn as sns import matplotlib.pyplot as plt from scipy.integrate import odeint from scipy.ndimage import gaussian_filter from emd_utils import time_extension, Utility from scipy.interpolate import CubicSpline from emd_hilbert import Hilbert, hilbert_spectrum from emd_preprocess import Preprocess from emd_mean import Fluctuation from AdvEMDpy import EMD # alternate packages from PyEMD import EMD as pyemd0215 import emd as emd040 sns.set(style='darkgrid') pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001) pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time) pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series) # plot 0 - addition fig = plt.figure(figsize=(9, 4)) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('First Iteration of Sifting Algorithm') plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1) plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()], c='r', label=r'$M(t_i)$', zorder=2) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4) plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()], c='c', label=r'$m(t_j)$', zorder=3) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5) plt.yticks(ticks=[-2, -1, 0, 1, 2]) plt.xticks(ticks=[0, np.pi, 2 * np.pi], labels=[r'0', r'$\pi$', r'$2\pi$']) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/pseudo_algorithm.png') plt.show() knots = np.arange(12) time = np.linspace(0, 11, 1101) basis = emd_basis.Basis(time=time, time_series=time) b_spline_basis = basis.cubic_b_spline(knots) chsi_basis = basis.chsi_basis(knots) # plot 1 plt.title('Non-Natural Cubic B-Spline Bases at Boundary') plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $') plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $') plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $') plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $') plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $') plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $']) plt.xlim(4.4, 6.6) plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.legend(loc='upper left') plt.savefig('jss_figures/boundary_bases.png') plt.show() # plot 1a - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) knots_uniform = np.linspace(0, 2 * np.pi, 51) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0] fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Uniform Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Uniform Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Uniform Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots_uniform)): axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_uniform.png') plt.show() # plot 1b - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=1, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Statically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Statically Optimised Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Statically Optimised Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots)): axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_1.png') plt.show() # plot 1c - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=2, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Dynamically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Dynamically Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Dynamically Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots[i])): axs[i].plot(knots[i][j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_2.png') plt.show() # plot 1d - addition window = 81 fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Filtering Demonstration') axs[1].set_title('Zoomed Region') preprocess_time = pseudo_alg_time.copy() np.random.seed(1) random.seed(1) preprocess_time_series = pseudo_alg_time_series + np.random.normal(0, 0.1, len(preprocess_time)) for i in random.sample(range(1000), 500): preprocess_time_series[i] += np.random.normal(0, 1) preprocess = Preprocess(time=preprocess_time, time_series=preprocess_time_series) axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[0].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[0].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[0].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[1].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[1].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[1].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_filter.png') plt.show() # plot 1e - addition fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Smoothing Demonstration') axs[1].set_title('Zoomed Region') axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[0].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) downsampled_and_decimated = preprocess.downsample() axs[0].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 11)) downsampled = preprocess.downsample(decimate=False) axs[0].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[1].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) axs[1].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 13)) axs[1].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.06, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.06, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_smooth.png') plt.show() # plot 2 fig, axs = plt.subplots(1, 2, sharey=True) axs[0].set_title('Cubic B-Spline Bases') axs[0].plot(time, b_spline_basis[2, :].T, '--', label='Basis 1') axs[0].plot(time, b_spline_basis[3, :].T, '--', label='Basis 2') axs[0].plot(time, b_spline_basis[4, :].T, '--', label='Basis 3') axs[0].plot(time, b_spline_basis[5, :].T, '--', label='Basis 4') axs[0].legend(loc='upper left') axs[0].plot(5 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].plot(6 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].set_xticks([5, 6]) axs[0].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[0].set_xlim(4.5, 6.5) axs[1].set_title('Cubic Hermite Spline Bases') axs[1].plot(time, chsi_basis[10, :].T, '--') axs[1].plot(time, chsi_basis[11, :].T, '--') axs[1].plot(time, chsi_basis[12, :].T, '--') axs[1].plot(time, chsi_basis[13, :].T, '--') axs[1].plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].set_xticks([5, 6]) axs[1].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[1].set_xlim(4.5, 6.5) plt.savefig('jss_figures/comparing_bases.png') plt.show() # plot 3 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_dash = maxima_y[-1] * np.ones_like(max_dash_time) min_dash_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_dash = minima_y[-1] * np.ones_like(min_dash_time) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) max_discard = maxima_y[-1] max_discard_time = minima_x[-1] - maxima_x[-1] + minima_x[-1] max_discard_dash_time = np.linspace(max_discard_time - width, max_discard_time + width, 101) max_discard_dash = max_discard * np.ones_like(max_discard_dash_time) dash_2_time = np.linspace(minima_x[-1], max_discard_time, 101) dash_2 = np.linspace(minima_y[-1], max_discard, 101) end_point_time = time[-1] end_point = time_series[-1] time_reflect = np.linspace((5 - a) * np.pi, (5 + a) * np.pi, 101) time_series_reflect = np.flip(np.cos(np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101)) + np.cos(5 * np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101))) time_series_anti_reflect = time_series_reflect[0] - time_series_reflect utils = emd_utils.Utility(time=time, time_series=time_series_anti_reflect) anti_max_bool = utils.max_bool_func_1st_order_fd() anti_max_point_time = time_reflect[anti_max_bool] anti_max_point = time_series_anti_reflect[anti_max_bool] utils = emd_utils.Utility(time=time, time_series=time_series_reflect) no_anchor_max_time = time_reflect[utils.max_bool_func_1st_order_fd()] no_anchor_max = time_series_reflect[utils.max_bool_func_1st_order_fd()] point_1 = 5.4 length_distance = np.linspace(maxima_y[-1], minima_y[-1], 101) length_distance_time = point_1 * np.pi * np.ones_like(length_distance) length_time = np.linspace(point_1 * np.pi - width, point_1 * np.pi + width, 101) length_top = maxima_y[-1] * np.ones_like(length_time) length_bottom = minima_y[-1] * np.ones_like(length_time) point_2 = 5.2 length_distance_2 = np.linspace(time_series[-1], minima_y[-1], 101) length_distance_time_2 = point_2 * np.pi * np.ones_like(length_distance_2) length_time_2 = np.linspace(point_2 * np.pi - width, point_2 * np.pi + width, 101) length_top_2 = time_series[-1] * np.ones_like(length_time_2) length_bottom_2 = minima_y[-1] * np.ones_like(length_time_2) symmetry_axis_1_time = minima_x[-1] * np.ones(101) symmetry_axis_2_time = time[-1] * np.ones(101) symmetry_axis = np.linspace(-2, 2, 101) end_time = np.linspace(time[-1] - width, time[-1] + width, 101) end_signal = time_series[-1] * np.ones_like(end_time) anti_symmetric_time = np.linspace(time[-1] - 0.5, time[-1] + 0.5, 101) anti_symmetric_signal = time_series[-1] * np.ones_like(anti_symmetric_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Symmetry Edge Effects Example') plt.plot(time_reflect, time_series_reflect, 'g--', LineWidth=2, label=textwrap.fill('Symmetric signal', 10)) plt.plot(time_reflect[:51], time_series_anti_reflect[:51], '--', c='purple', LineWidth=2, label=textwrap.fill('Anti-symmetric signal', 10)) plt.plot(max_dash_time, max_dash, 'k-') plt.plot(min_dash_time, min_dash, 'k-') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(length_distance_time, length_distance, 'k--') plt.plot(length_distance_time_2, length_distance_2, 'k--') plt.plot(length_time, length_top, 'k-') plt.plot(length_time, length_bottom, 'k-') plt.plot(length_time_2, length_top_2, 'k-') plt.plot(length_time_2, length_bottom_2, 'k-') plt.plot(end_time, end_signal, 'k-') plt.plot(symmetry_axis_1_time, symmetry_axis, 'r--', zorder=1) plt.plot(anti_symmetric_time, anti_symmetric_signal, 'r--', zorder=1) plt.plot(symmetry_axis_2_time, symmetry_axis, 'r--', label=textwrap.fill('Axes of symmetry', 10), zorder=1) plt.text(5.1 * np.pi, -0.7, r'$\beta$L') plt.text(5.34 * np.pi, -0.05, 'L') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(max_discard_time, max_discard, c='purple', zorder=4, label=textwrap.fill('Symmetric Discard maxima', 10)) plt.scatter(end_point_time, end_point, c='orange', zorder=4, label=textwrap.fill('Symmetric Anchor maxima', 10)) plt.scatter(anti_max_point_time, anti_max_point, c='green', zorder=4, label=textwrap.fill('Anti-Symmetric maxima', 10)) plt.scatter(no_anchor_max_time, no_anchor_max, c='gray', zorder=4, label=textwrap.fill('Symmetric maxima', 10)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_symmetry_anti.png') plt.show() # plot 4 a = 0.21 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_1 = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_dash_2 = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_dash_time_1 = maxima_x[-1] * np.ones_like(max_dash_1) max_dash_time_2 = maxima_x[-2] * np.ones_like(max_dash_1) min_dash_1 = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_dash_2 = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_dash_time_1 = minima_x[-1] * np.ones_like(min_dash_1) min_dash_time_2 = minima_x[-2] * np.ones_like(min_dash_1) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) dash_2_time = np.linspace(maxima_x[-1], minima_x[-2], 101) dash_2 = np.linspace(maxima_y[-1], minima_y[-2], 101) s1 = (minima_y[-2] - maxima_y[-1]) / (minima_x[-2] - maxima_x[-1]) slope_based_maximum_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) slope_based_maximum = minima_y[-1] + (slope_based_maximum_time - minima_x[-1]) * s1 max_dash_time_3 = slope_based_maximum_time * np.ones_like(max_dash_1) max_dash_3 = np.linspace(slope_based_maximum - width, slope_based_maximum + width, 101) dash_3_time = np.linspace(minima_x[-1], slope_based_maximum_time, 101) dash_3 = np.linspace(minima_y[-1], slope_based_maximum, 101) s2 = (minima_y[-1] - maxima_y[-1]) / (minima_x[-1] - maxima_x[-1]) slope_based_minimum_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) slope_based_minimum = slope_based_maximum - (slope_based_maximum_time - slope_based_minimum_time) * s2 min_dash_time_3 = slope_based_minimum_time * np.ones_like(min_dash_1) min_dash_3 = np.linspace(slope_based_minimum - width, slope_based_minimum + width, 101) dash_4_time = np.linspace(slope_based_maximum_time, slope_based_minimum_time) dash_4 = np.linspace(slope_based_maximum, slope_based_minimum) maxima_dash = np.linspace(2.5 - width, 2.5 + width, 101) maxima_dash_time_1 = maxima_x[-2] * np.ones_like(maxima_dash) maxima_dash_time_2 = maxima_x[-1] * np.ones_like(maxima_dash) maxima_dash_time_3 = slope_based_maximum_time * np.ones_like(maxima_dash) maxima_line_dash_time = np.linspace(maxima_x[-2], slope_based_maximum_time, 101) maxima_line_dash = 2.5 * np.ones_like(maxima_line_dash_time) minima_dash = np.linspace(-3.4 - width, -3.4 + width, 101) minima_dash_time_1 = minima_x[-2] * np.ones_like(minima_dash) minima_dash_time_2 = minima_x[-1] * np.ones_like(minima_dash) minima_dash_time_3 = slope_based_minimum_time * np.ones_like(minima_dash) minima_line_dash_time = np.linspace(minima_x[-2], slope_based_minimum_time, 101) minima_line_dash = -3.4 * np.ones_like(minima_line_dash_time) # slightly edit signal to make difference between slope-based method and improved slope-based method more clear time_series[time >= minima_x[-1]] = 1.5 * (time_series[time >= minima_x[-1]] - time_series[time == minima_x[-1]]) + \ time_series[time == minima_x[-1]] improved_slope_based_maximum_time = time[-1] improved_slope_based_maximum = time_series[-1] improved_slope_based_minimum_time = slope_based_minimum_time improved_slope_based_minimum = improved_slope_based_maximum + s2 * (improved_slope_based_minimum_time - improved_slope_based_maximum_time) min_dash_4 = np.linspace(improved_slope_based_minimum - width, improved_slope_based_minimum + width, 101) min_dash_time_4 = improved_slope_based_minimum_time * np.ones_like(min_dash_4) dash_final_time = np.linspace(improved_slope_based_maximum_time, improved_slope_based_minimum_time, 101) dash_final = np.linspace(improved_slope_based_maximum, improved_slope_based_minimum, 101) ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 0.9 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Slope-Based Edge Effects Example') plt.plot(max_dash_time_1, max_dash_1, 'k-') plt.plot(max_dash_time_2, max_dash_2, 'k-') plt.plot(max_dash_time_3, max_dash_3, 'k-') plt.plot(min_dash_time_1, min_dash_1, 'k-') plt.plot(min_dash_time_2, min_dash_2, 'k-') plt.plot(min_dash_time_3, min_dash_3, 'k-') plt.plot(min_dash_time_4, min_dash_4, 'k-') plt.plot(maxima_dash_time_1, maxima_dash, 'k-') plt.plot(maxima_dash_time_2, maxima_dash, 'k-') plt.plot(maxima_dash_time_3, maxima_dash, 'k-') plt.plot(minima_dash_time_1, minima_dash, 'k-') plt.plot(minima_dash_time_2, minima_dash, 'k-') plt.plot(minima_dash_time_3, minima_dash, 'k-') plt.text(4.34 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.74 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.12 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.50 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.30 * np.pi, 0.35, r'$s_1$') plt.text(4.43 * np.pi, -0.20, r'$s_2$') plt.text(4.30 * np.pi + (minima_x[-1] - minima_x[-2]), 0.35 + (minima_y[-1] - minima_y[-2]), r'$s_1$') plt.text(4.43 * np.pi + (slope_based_minimum_time - minima_x[-1]), -0.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.text(4.50 * np.pi + (slope_based_minimum_time - minima_x[-1]), 1.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.plot(minima_line_dash_time, minima_line_dash, 'k--') plt.plot(maxima_line_dash_time, maxima_line_dash, 'k--') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(dash_3_time, dash_3, 'k--') plt.plot(dash_4_time, dash_4, 'k--') plt.plot(dash_final_time, dash_final, 'k--') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(slope_based_maximum_time, slope_based_maximum, c='orange', zorder=4, label=textwrap.fill('Slope-based maximum', 11)) plt.scatter(slope_based_minimum_time, slope_based_minimum, c='purple', zorder=4, label=textwrap.fill('Slope-based minimum', 11)) plt.scatter(improved_slope_based_maximum_time, improved_slope_based_maximum, c='deeppink', zorder=4, label=textwrap.fill('Improved slope-based maximum', 11)) plt.scatter(improved_slope_based_minimum_time, improved_slope_based_minimum, c='dodgerblue', zorder=4, label=textwrap.fill('Improved slope-based minimum', 11)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-3, -2, -1, 0, 1, 2), ('-3', '-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_slope_based.png') plt.show() # plot 5 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] A2 = np.abs(maxima_y[-2] - minima_y[-2]) / 2 A1 = np.abs(maxima_y[-1] - minima_y[-1]) / 2 P2 = 2 * np.abs(maxima_x[-2] - minima_x[-2]) P1 = 2 * np.abs(maxima_x[-1] - minima_x[-1]) Huang_time = (P1 / P2) * (time[time >= maxima_x[-2]] - time[time == maxima_x[-2]]) + maxima_x[-1] Huang_wave = (A1 / A2) * (time_series[time >= maxima_x[-2]] - time_series[time == maxima_x[-2]]) + maxima_y[-1] Coughlin_time = Huang_time Coughlin_wave = A1 * np.cos(2 * np.pi * (1 / P1) * (Coughlin_time - Coughlin_time[0])) Average_max_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) Average_max = (maxima_y[-2] + maxima_y[-1]) / 2 Average_min_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) Average_min = (minima_y[-2] + minima_y[-1]) / 2 utils_Huang = emd_utils.Utility(time=time, time_series=Huang_wave) Huang_max_bool = utils_Huang.max_bool_func_1st_order_fd() Huang_min_bool = utils_Huang.min_bool_func_1st_order_fd() utils_Coughlin = emd_utils.Utility(time=time, time_series=Coughlin_wave) Coughlin_max_bool = utils_Coughlin.max_bool_func_1st_order_fd() Coughlin_min_bool = utils_Coughlin.min_bool_func_1st_order_fd() Huang_max_time = Huang_time[Huang_max_bool] Huang_max = Huang_wave[Huang_max_bool] Huang_min_time = Huang_time[Huang_min_bool] Huang_min = Huang_wave[Huang_min_bool] Coughlin_max_time = Coughlin_time[Coughlin_max_bool] Coughlin_max = Coughlin_wave[Coughlin_max_bool] Coughlin_min_time = Coughlin_time[Coughlin_min_bool] Coughlin_min = Coughlin_wave[Coughlin_min_bool] max_2_x_time = np.linspace(maxima_x[-2] - width, maxima_x[-2] + width, 101) max_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) max_2_x = maxima_y[-2] * np.ones_like(max_2_x_time) min_2_x_time = np.linspace(minima_x[-2] - width, minima_x[-2] + width, 101) min_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) min_2_x = minima_y[-2] * np.ones_like(min_2_x_time) dash_max_min_2_x = np.linspace(minima_y[-2], maxima_y[-2], 101) dash_max_min_2_x_time = 5.3 * np.pi * np.ones_like(dash_max_min_2_x) max_2_y = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) max_2_y_time = maxima_x[-2] * np.ones_like(max_2_y) min_2_y = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) min_2_y_time = minima_x[-2] * np.ones_like(min_2_y) dash_max_min_2_y_time = np.linspace(minima_x[-2], maxima_x[-2], 101) dash_max_min_2_y = -1.8 * np.ones_like(dash_max_min_2_y_time) max_1_x_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) max_1_x = maxima_y[-1] * np.ones_like(max_1_x_time) min_1_x_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) min_1_x = minima_y[-1] * np.ones_like(min_1_x_time) dash_max_min_1_x = np.linspace(minima_y[-1], maxima_y[-1], 101) dash_max_min_1_x_time = 5.4 * np.pi * np.ones_like(dash_max_min_1_x) max_1_y = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) max_1_y_time = maxima_x[-1] * np.ones_like(max_1_y) min_1_y = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) min_1_y_time = minima_x[-1] * np.ones_like(min_1_y) dash_max_min_1_y_time = np.linspace(minima_x[-1], maxima_x[-1], 101) dash_max_min_1_y = -2.1 * np.ones_like(dash_max_min_1_y_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Characteristic Wave Effects Example') plt.plot(time, time_series, LineWidth=2, label='Signal') plt.scatter(Huang_max_time, Huang_max, c='magenta', zorder=4, label=textwrap.fill('Huang maximum', 10)) plt.scatter(Huang_min_time, Huang_min, c='lime', zorder=4, label=textwrap.fill('Huang minimum', 10)) plt.scatter(Coughlin_max_time, Coughlin_max, c='darkorange', zorder=4, label=textwrap.fill('Coughlin maximum', 14)) plt.scatter(Coughlin_min_time, Coughlin_min, c='dodgerblue', zorder=4, label=textwrap.fill('Coughlin minimum', 14)) plt.scatter(Average_max_time, Average_max, c='orangered', zorder=4, label=textwrap.fill('Average maximum', 14)) plt.scatter(Average_min_time, Average_min, c='cyan', zorder=4, label=textwrap.fill('Average minimum', 14)) plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.plot(Huang_time, Huang_wave, '--', c='darkviolet', label=textwrap.fill('Huang Characteristic Wave', 14)) plt.plot(Coughlin_time, Coughlin_wave, '--', c='darkgreen', label=textwrap.fill('Coughlin Characteristic Wave', 14)) plt.plot(max_2_x_time, max_2_x, 'k-') plt.plot(max_2_x_time_side, max_2_x, 'k-') plt.plot(min_2_x_time, min_2_x, 'k-') plt.plot(min_2_x_time_side, min_2_x, 'k-') plt.plot(dash_max_min_2_x_time, dash_max_min_2_x, 'k--') plt.text(5.16 * np.pi, 0.85, r'$2a_2$') plt.plot(max_2_y_time, max_2_y, 'k-') plt.plot(max_2_y_time, max_2_y_side, 'k-') plt.plot(min_2_y_time, min_2_y, 'k-') plt.plot(min_2_y_time, min_2_y_side, 'k-') plt.plot(dash_max_min_2_y_time, dash_max_min_2_y, 'k--') plt.text(4.08 * np.pi, -2.2, r'$\frac{p_2}{2}$') plt.plot(max_1_x_time, max_1_x, 'k-') plt.plot(max_1_x_time_side, max_1_x, 'k-') plt.plot(min_1_x_time, min_1_x, 'k-') plt.plot(min_1_x_time_side, min_1_x, 'k-') plt.plot(dash_max_min_1_x_time, dash_max_min_1_x, 'k--') plt.text(5.42 * np.pi, -0.1, r'$2a_1$') plt.plot(max_1_y_time, max_1_y, 'k-') plt.plot(max_1_y_time, max_1_y_side, 'k-') plt.plot(min_1_y_time, min_1_y, 'k-') plt.plot(min_1_y_time, min_1_y_side, 'k-') plt.plot(dash_max_min_1_y_time, dash_max_min_1_y, 'k--') plt.text(4.48 * np.pi, -2.5, r'$\frac{p_1}{2}$') plt.xlim(3.9 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_characteristic_wave.png') plt.show() # plot 6 t = np.linspace(5, 95, 100) signal_orig = np.cos(2 * np.pi * t / 50) + 0.6 * np.cos(2 * np.pi * t / 25) + 0.5 * np.sin(2 * np.pi * t / 200) util_nn = emd_utils.Utility(time=t, time_series=signal_orig) maxima = signal_orig[util_nn.max_bool_func_1st_order_fd()] minima = signal_orig[util_nn.min_bool_func_1st_order_fd()] cs_max = CubicSpline(t[util_nn.max_bool_func_1st_order_fd()], maxima) cs_min = CubicSpline(t[util_nn.min_bool_func_1st_order_fd()], minima) time = np.linspace(0, 5 * np.pi, 1001) lsq_signal = np.cos(time) + np.cos(5 * time) knots = np.linspace(0, 5 * np.pi, 101) time_extended = time_extension(time) time_series_extended = np.zeros_like(time_extended) / 0 time_series_extended[int(len(lsq_signal) - 1):int(2 * (len(lsq_signal) - 1) + 1)] = lsq_signal neural_network_m = 200 neural_network_k = 100 # forward -> P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[(-(neural_network_m + neural_network_k - col)):(-(neural_network_m - col))] P[-1, col] = 1 # for additive constant t = lsq_signal[-neural_network_m:] # test - top seed_weights = np.ones(neural_network_k) / neural_network_k weights = 0 * seed_weights.copy() train_input = P[:-1, :] lr = 0.01 for iterations in range(1000): output = np.matmul(weights, train_input) error = (t - output) gradients = error * (- train_input) # guess average gradients average_gradients = np.mean(gradients, axis=1) # steepest descent max_gradient_vector = average_gradients * (np.abs(average_gradients) == max(np.abs(average_gradients))) adjustment = - lr * average_gradients # adjustment = - lr * max_gradient_vector weights += adjustment # test - bottom weights_right = np.hstack((weights, 0)) max_count_right = 0 min_count_right = 0 i_right = 0 while ((max_count_right < 1) or (min_count_right < 1)) and (i_right < len(lsq_signal) - 1): time_series_extended[int(2 * (len(lsq_signal) - 1) + 1 + i_right)] = \ sum(weights_right * np.hstack((time_series_extended[ int(2 * (len(lsq_signal) - 1) + 1 - neural_network_k + i_right): int(2 * (len(lsq_signal) - 1) + 1 + i_right)], 1))) i_right += 1 if i_right > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_right += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_right += 1 # backward <- P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[int(col + 1):int(col + neural_network_k + 1)] P[-1, col] = 1 # for additive constant t = lsq_signal[:neural_network_m] vx = cvx.Variable(int(neural_network_k + 1)) objective = cvx.Minimize(cvx.norm((2 * (vx * P) + 1 - t), 2)) # linear activation function is arbitrary prob = cvx.Problem(objective) result = prob.solve(verbose=True, solver=cvx.ECOS) weights_left = np.array(vx.value) max_count_left = 0 min_count_left = 0 i_left = 0 while ((max_count_left < 1) or (min_count_left < 1)) and (i_left < len(lsq_signal) - 1): time_series_extended[int(len(lsq_signal) - 2 - i_left)] = \ 2 * sum(weights_left * np.hstack((time_series_extended[int(len(lsq_signal) - 1 - i_left): int(len(lsq_signal) - 1 - i_left + neural_network_k)], 1))) + 1 i_left += 1 if i_left > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_left += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_left += 1 lsq_utils = emd_utils.Utility(time=time, time_series=lsq_signal) utils_extended = emd_utils.Utility(time=time_extended, time_series=time_series_extended) maxima = lsq_signal[lsq_utils.max_bool_func_1st_order_fd()] maxima_time = time[lsq_utils.max_bool_func_1st_order_fd()] maxima_extrapolate = time_series_extended[utils_extended.max_bool_func_1st_order_fd()][-1] maxima_extrapolate_time = time_extended[utils_extended.max_bool_func_1st_order_fd()][-1] minima = lsq_signal[lsq_utils.min_bool_func_1st_order_fd()] minima_time = time[lsq_utils.min_bool_func_1st_order_fd()] minima_extrapolate = time_series_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] minima_extrapolate_time = time_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Single Neuron Neural Network Example') plt.plot(time, lsq_signal, zorder=2, label='Signal') plt.plot(time_extended, time_series_extended, c='g', zorder=1, label=textwrap.fill('Extrapolated signal', 12)) plt.scatter(maxima_time, maxima, c='r', zorder=3, label='Maxima') plt.scatter(minima_time, minima, c='b', zorder=3, label='Minima') plt.scatter(maxima_extrapolate_time, maxima_extrapolate, c='magenta', zorder=3, label=textwrap.fill('Extrapolated maxima', 12)) plt.scatter(minima_extrapolate_time, minima_extrapolate, c='cyan', zorder=4, label=textwrap.fill('Extrapolated minima', 12)) plt.plot(((time[-302] + time[-301]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k', label=textwrap.fill('Neural network inputs', 13)) plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(((time_extended[-1001] + time_extended[-1002]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k') plt.plot(((time[-202] + time[-201]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed', label=textwrap.fill('Neural network targets', 13)) plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2), ((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2), ((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='gray') plt.plot(((time_extended[-1001] + time_extended[-1000]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed') plt.xlim(3.4 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/neural_network.png') plt.show() # plot 6a np.random.seed(0) time = np.linspace(0, 5 * np.pi, 1001) knots_51 = np.linspace(0, 5 * np.pi, 51) time_series = np.cos(2 * time) + np.cos(4 * time) + np.cos(8 * time) noise = np.random.normal(0, 1, len(time_series)) time_series += noise advemdpy = EMD(time=time, time_series=time_series) imfs_51, hts_51, ifs_51 = advemdpy.empirical_mode_decomposition(knots=knots_51, max_imfs=3, edge_effect='symmetric_anchor', verbose=False)[:3] knots_31 = np.linspace(0, 5 * np.pi, 31) imfs_31, hts_31, ifs_31 = advemdpy.empirical_mode_decomposition(knots=knots_31, max_imfs=2, edge_effect='symmetric_anchor', verbose=False)[:3] knots_11 = np.linspace(0, 5 * np.pi, 11) imfs_11, hts_11, ifs_11 = advemdpy.empirical_mode_decomposition(knots=knots_11, max_imfs=1, edge_effect='symmetric_anchor', verbose=False)[:3] fig, axs = plt.subplots(3, 1) plt.suptitle(textwrap.fill('Comparison of Trends Extracted with Different Knot Sequences', 40)) plt.subplots_adjust(hspace=0.1) axs[0].plot(time, time_series, label='Time series') axs[0].plot(time, imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 1, IMF 2, & IMF 3 with 51 knots', 21)) print(f'DFA fluctuation with 51 knots: {np.round(np.var(time_series - (imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :])), 3)}') for knot in knots_51: axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[0].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[0].set_xticklabels(['', '', '', '', '', '']) axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[0].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[0].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].plot(time, time_series, label='Time series') axs[1].plot(time, imfs_31[1, :] + imfs_31[2, :], label=textwrap.fill('Sum of IMF 1 and IMF 2 with 31 knots', 19)) axs[1].plot(time, imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 2 and IMF 3 with 51 knots', 19)) print(f'DFA fluctuation with 31 knots: {np.round(np.var(time_series - (imfs_31[1, :] + imfs_31[2, :])), 3)}') for knot in knots_31: axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[1].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[1].set_xticklabels(['', '', '', '', '', '']) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) axs[1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[1].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[1].plot(0.95 * np.pi * np.ones(101),
np.linspace(-5.5, 5.5, 101)
numpy.linspace
''' ------------------------------------------------------------------------------------------------- This code accompanies the paper titled "Human injury-based safety decision of automated vehicles" Author: <NAME>, <NAME>, <NAME>, <NAME> Corresponding author: <NAME> (<EMAIL>) ------------------------------------------------------------------------------------------------- ''' import torch import numpy as np from torch import nn from torch.nn.utils import weight_norm __author__ = "<NAME>" def Collision_cond(veh_striking_list, V1_v, V2_v, delta_angle, veh_param): ''' Estimate the collision condition. ''' (veh_l, veh_w, veh_cgf, veh_cgs, veh_k, veh_m) = veh_param delta_angle_2 = np.arccos(np.abs(np.cos(delta_angle))) if -1e-6 < delta_angle_2 < 1e-6: delta_angle_2 = 1e-6 delta_v1_list = [] delta_v2_list = [] # Estimate the collision condition (delat-v) according to the principal impact direction. for veh_striking in veh_striking_list: if veh_striking[0] == 1: veh_ca = np.arctan(veh_cgf[0] / veh_cgs[0]) veh_a2 = np.abs(veh_cgs[1] - veh_striking[3]) veh_RDS = np.abs(V1_v * np.cos(delta_angle) - V2_v) veh_a1 = np.abs(np.sqrt(veh_cgf[0] ** 2 + veh_cgs[0] ** 2) * np.cos(veh_ca + delta_angle_2)) if (veh_striking[1]+1) in [16, 1, 2, 3, 17, 20, 21] and (veh_striking[2]+1) in [16, 1, 2, 3, 17, 20, 21]: veh_e = 2 / veh_RDS else: veh_e = 0.5 / veh_RDS elif veh_striking[0] == 2: veh_ca = np.arctan(veh_cgf[0] / veh_cgs[0]) veh_a2 = np.abs(veh_cgf[1] - veh_striking[3]) veh_a1 = np.abs(
np.sqrt(veh_cgf[0] ** 2 + veh_cgs[0] ** 2)
numpy.sqrt
from os import listdir from os.path import isfile, join from path import Path import numpy as np import cv2 # Dataset path target_path = Path('target/') annotation_images_path = Path('dataset/ade20k/annotations/training/').abspath() dataset = [ f for f in listdir(annotation_images_path) if isfile(join(annotation_images_path,f))] images = np.empty(len(dataset), dtype = object) count = 1 # Iterate all Training Images for n in range(0, len(dataset)): # Read image images[n] = cv2.imread(join(annotation_images_path,dataset[n])) # Convert it to array array =
np.asarray(images[n],dtype=np.int8)
numpy.asarray
# Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """ postprocess. """ import os import argparse import numpy as np from src.ms_utils import calculate_auc from mindspore import context, load_checkpoint def softmax(x): t_max = np.max(x, axis=1, keepdims=True) # returns max of each row and keeps same dims e_x = np.exp(x - t_max) # subtracts each row with its max value t_sum = np.sum(e_x, axis=1, keepdims=True) # returns sum of each row and keeps same dims f_x = e_x / t_sum return f_x def score_model(preds, test_pos, test_neg, weight, bias): """ Score the model on the test set edges in each epoch. Args: epoch (LongTensor): Training epochs. Returns: auc(Float32): AUC result. f1(Float32): F1-Score result. """ score_positive_edges =
np.array(test_pos, dtype=np.int32)
numpy.array
from __future__ import print_function import numpy as np import matplotlib.pyplot as plt class TwoLayerNet(object): """ A two-layer fully-connected neural network. The net has an input dimension of N, a hidden layer dimension of H, and performs classification over C classes. We train the network with a softmax loss function and L2 regularization on the weight matrices. The network uses a ReLU nonlinearity after the first fully connected layer. In other words, the network has the following architecture: input - fully connected layer - ReLU - fully connected layer - softmax The outputs of the second fully-connected layer are the scores for each class. """ def __init__(self, input_size, hidden_size, output_size, std=1e-4): """ Initialize the model. Weights are initialized to small random values and biases are initialized to zero. Weights and biases are stored in the variable self.params, which is a dictionary with the following keys W1: First layer weights; has shape (D, H) b1: First layer biases; has shape (H,) W2: Second layer weights; has shape (H, C) b2: Second layer biases; has shape (C,) Inputs: - input_size: The dimension D of the input data. - hidden_size: The number of neurons H in the hidden layer. - output_size: The number of classes C. """ self.params = {} self.params['W1'] = std * np.random.randn(input_size, hidden_size) self.params['b1'] = np.zeros(hidden_size) self.params['W2'] = std * np.random.randn(hidden_size, output_size) self.params['b2'] = np.zeros(output_size) def loss(self, X, y=None, reg=0.0): """ Compute the loss and gradients for a two layer fully connected neural network. Inputs: - X: Input data of shape (N, D). Each X[i] is a training sample. - y: Vector of training labels. y[i] is the label for X[i], and each y[i] is an integer in the range 0 <= y[i] < C. This parameter is optional; if it is not passed then we only return scores, and if it is passed then we instead return the loss and gradients. - reg: Regularization strength. Returns: If y is None, return a matrix scores of shape (N, C) where scores[i, c] is the score for class c on input X[i]. If y is not None, instead return a tuple of: - loss: Loss (data loss and regularization loss) for this batch of training samples. - grads: Dictionary mapping parameter names to gradients of those parameters with respect to the loss function; has the same keys as self.params. """ # Unpack variables from the params dictionary W1, b1 = self.params['W1'], self.params['b1'] W2, b2 = self.params['W2'], self.params['b2'] N, D = X.shape # Compute the forward pass scores = None ####################################################################### # TODO: Perform the forward pass, computing the class scores for the # # input. Store the result in the scores variable, which should be an # # array of shape (N, C). # ####################################################################### scores1 = X.dot(W1) + b1 # FC1 X2 = np.maximum(0, scores1) # ReLU FC1 scores = X2.dot(W2) + b2 # FC2 ####################################################################### # END OF YOUR CODE # ####################################################################### # If the targets are not given then jump out, we're done if y is None: return scores scores -= np.max(scores) # Fix Number instability scores_exp = np.exp(scores) probs = scores_exp /
np.sum(scores_exp, axis=1, keepdims=True)
numpy.sum
# ________ # / # \ / # \ / # \/ import random import textwrap import emd_mean import AdvEMDpy import emd_basis import emd_utils import numpy as np import pandas as pd import cvxpy as cvx import seaborn as sns import matplotlib.pyplot as plt from scipy.integrate import odeint from scipy.ndimage import gaussian_filter from emd_utils import time_extension, Utility from scipy.interpolate import CubicSpline from emd_hilbert import Hilbert, hilbert_spectrum from emd_preprocess import Preprocess from emd_mean import Fluctuation from AdvEMDpy import EMD # alternate packages from PyEMD import EMD as pyemd0215 import emd as emd040 sns.set(style='darkgrid') pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001) pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time) pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series) # plot 0 - addition fig = plt.figure(figsize=(9, 4)) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('First Iteration of Sifting Algorithm') plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1) plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()], c='r', label=r'$M(t_i)$', zorder=2) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4) plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()], c='c', label=r'$m(t_j)$', zorder=3) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5) plt.yticks(ticks=[-2, -1, 0, 1, 2]) plt.xticks(ticks=[0, np.pi, 2 * np.pi], labels=[r'0', r'$\pi$', r'$2\pi$']) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/pseudo_algorithm.png') plt.show() knots = np.arange(12) time = np.linspace(0, 11, 1101) basis = emd_basis.Basis(time=time, time_series=time) b_spline_basis = basis.cubic_b_spline(knots) chsi_basis = basis.chsi_basis(knots) # plot 1 plt.title('Non-Natural Cubic B-Spline Bases at Boundary') plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $') plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $') plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $') plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $') plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $') plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $']) plt.xlim(4.4, 6.6) plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.legend(loc='upper left') plt.savefig('jss_figures/boundary_bases.png') plt.show() # plot 1a - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) knots_uniform = np.linspace(0, 2 * np.pi, 51) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0] fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Uniform Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Uniform Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Uniform Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots_uniform)): axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_uniform.png') plt.show() # plot 1b - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=1, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Statically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Statically Optimised Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Statically Optimised Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots)): axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_1.png') plt.show() # plot 1c - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=2, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Dynamically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Dynamically Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Dynamically Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots[i])): axs[i].plot(knots[i][j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_2.png') plt.show() # plot 1d - addition window = 81 fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Filtering Demonstration') axs[1].set_title('Zoomed Region') preprocess_time = pseudo_alg_time.copy() np.random.seed(1) random.seed(1) preprocess_time_series = pseudo_alg_time_series + np.random.normal(0, 0.1, len(preprocess_time)) for i in random.sample(range(1000), 500): preprocess_time_series[i] += np.random.normal(0, 1) preprocess = Preprocess(time=preprocess_time, time_series=preprocess_time_series) axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[0].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[0].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[0].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[1].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[1].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[1].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_filter.png') plt.show() # plot 1e - addition fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Smoothing Demonstration') axs[1].set_title('Zoomed Region') axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[0].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) downsampled_and_decimated = preprocess.downsample() axs[0].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 11)) downsampled = preprocess.downsample(decimate=False) axs[0].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[1].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) axs[1].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 13)) axs[1].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.06, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.06, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_smooth.png') plt.show() # plot 2 fig, axs = plt.subplots(1, 2, sharey=True) axs[0].set_title('Cubic B-Spline Bases') axs[0].plot(time, b_spline_basis[2, :].T, '--', label='Basis 1') axs[0].plot(time, b_spline_basis[3, :].T, '--', label='Basis 2') axs[0].plot(time, b_spline_basis[4, :].T, '--', label='Basis 3') axs[0].plot(time, b_spline_basis[5, :].T, '--', label='Basis 4') axs[0].legend(loc='upper left') axs[0].plot(5 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].plot(6 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].set_xticks([5, 6]) axs[0].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[0].set_xlim(4.5, 6.5) axs[1].set_title('Cubic Hermite Spline Bases') axs[1].plot(time, chsi_basis[10, :].T, '--') axs[1].plot(time, chsi_basis[11, :].T, '--') axs[1].plot(time, chsi_basis[12, :].T, '--') axs[1].plot(time, chsi_basis[13, :].T, '--') axs[1].plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].set_xticks([5, 6]) axs[1].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[1].set_xlim(4.5, 6.5) plt.savefig('jss_figures/comparing_bases.png') plt.show() # plot 3 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_dash = maxima_y[-1] * np.ones_like(max_dash_time) min_dash_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_dash = minima_y[-1] * np.ones_like(min_dash_time) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) max_discard = maxima_y[-1] max_discard_time = minima_x[-1] - maxima_x[-1] + minima_x[-1] max_discard_dash_time = np.linspace(max_discard_time - width, max_discard_time + width, 101) max_discard_dash = max_discard * np.ones_like(max_discard_dash_time) dash_2_time = np.linspace(minima_x[-1], max_discard_time, 101) dash_2 = np.linspace(minima_y[-1], max_discard, 101) end_point_time = time[-1] end_point = time_series[-1] time_reflect = np.linspace((5 - a) * np.pi, (5 + a) * np.pi, 101) time_series_reflect = np.flip(np.cos(np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101)) + np.cos(5 * np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101))) time_series_anti_reflect = time_series_reflect[0] - time_series_reflect utils = emd_utils.Utility(time=time, time_series=time_series_anti_reflect) anti_max_bool = utils.max_bool_func_1st_order_fd() anti_max_point_time = time_reflect[anti_max_bool] anti_max_point = time_series_anti_reflect[anti_max_bool] utils = emd_utils.Utility(time=time, time_series=time_series_reflect) no_anchor_max_time = time_reflect[utils.max_bool_func_1st_order_fd()] no_anchor_max = time_series_reflect[utils.max_bool_func_1st_order_fd()] point_1 = 5.4 length_distance = np.linspace(maxima_y[-1], minima_y[-1], 101) length_distance_time = point_1 * np.pi * np.ones_like(length_distance) length_time = np.linspace(point_1 * np.pi - width, point_1 * np.pi + width, 101) length_top = maxima_y[-1] * np.ones_like(length_time) length_bottom = minima_y[-1] * np.ones_like(length_time) point_2 = 5.2 length_distance_2 = np.linspace(time_series[-1], minima_y[-1], 101) length_distance_time_2 = point_2 * np.pi * np.ones_like(length_distance_2) length_time_2 = np.linspace(point_2 * np.pi - width, point_2 * np.pi + width, 101) length_top_2 = time_series[-1] * np.ones_like(length_time_2) length_bottom_2 = minima_y[-1] * np.ones_like(length_time_2) symmetry_axis_1_time = minima_x[-1] * np.ones(101) symmetry_axis_2_time = time[-1] * np.ones(101) symmetry_axis = np.linspace(-2, 2, 101) end_time = np.linspace(time[-1] - width, time[-1] + width, 101) end_signal = time_series[-1] * np.ones_like(end_time) anti_symmetric_time = np.linspace(time[-1] - 0.5, time[-1] + 0.5, 101) anti_symmetric_signal = time_series[-1] * np.ones_like(anti_symmetric_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Symmetry Edge Effects Example') plt.plot(time_reflect, time_series_reflect, 'g--', LineWidth=2, label=textwrap.fill('Symmetric signal', 10)) plt.plot(time_reflect[:51], time_series_anti_reflect[:51], '--', c='purple', LineWidth=2, label=textwrap.fill('Anti-symmetric signal', 10)) plt.plot(max_dash_time, max_dash, 'k-') plt.plot(min_dash_time, min_dash, 'k-') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(length_distance_time, length_distance, 'k--') plt.plot(length_distance_time_2, length_distance_2, 'k--') plt.plot(length_time, length_top, 'k-') plt.plot(length_time, length_bottom, 'k-') plt.plot(length_time_2, length_top_2, 'k-') plt.plot(length_time_2, length_bottom_2, 'k-') plt.plot(end_time, end_signal, 'k-') plt.plot(symmetry_axis_1_time, symmetry_axis, 'r--', zorder=1) plt.plot(anti_symmetric_time, anti_symmetric_signal, 'r--', zorder=1) plt.plot(symmetry_axis_2_time, symmetry_axis, 'r--', label=textwrap.fill('Axes of symmetry', 10), zorder=1) plt.text(5.1 * np.pi, -0.7, r'$\beta$L') plt.text(5.34 * np.pi, -0.05, 'L') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(max_discard_time, max_discard, c='purple', zorder=4, label=textwrap.fill('Symmetric Discard maxima', 10)) plt.scatter(end_point_time, end_point, c='orange', zorder=4, label=textwrap.fill('Symmetric Anchor maxima', 10)) plt.scatter(anti_max_point_time, anti_max_point, c='green', zorder=4, label=textwrap.fill('Anti-Symmetric maxima', 10)) plt.scatter(no_anchor_max_time, no_anchor_max, c='gray', zorder=4, label=textwrap.fill('Symmetric maxima', 10)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_symmetry_anti.png') plt.show() # plot 4 a = 0.21 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_1 = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_dash_2 = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_dash_time_1 = maxima_x[-1] * np.ones_like(max_dash_1) max_dash_time_2 = maxima_x[-2] * np.ones_like(max_dash_1) min_dash_1 = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_dash_2 = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_dash_time_1 = minima_x[-1] * np.ones_like(min_dash_1) min_dash_time_2 = minima_x[-2] * np.ones_like(min_dash_1) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) dash_2_time = np.linspace(maxima_x[-1], minima_x[-2], 101) dash_2 = np.linspace(maxima_y[-1], minima_y[-2], 101) s1 = (minima_y[-2] - maxima_y[-1]) / (minima_x[-2] - maxima_x[-1]) slope_based_maximum_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) slope_based_maximum = minima_y[-1] + (slope_based_maximum_time - minima_x[-1]) * s1 max_dash_time_3 = slope_based_maximum_time * np.ones_like(max_dash_1) max_dash_3 = np.linspace(slope_based_maximum - width, slope_based_maximum + width, 101) dash_3_time = np.linspace(minima_x[-1], slope_based_maximum_time, 101) dash_3 = np.linspace(minima_y[-1], slope_based_maximum, 101) s2 = (minima_y[-1] - maxima_y[-1]) / (minima_x[-1] - maxima_x[-1]) slope_based_minimum_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) slope_based_minimum = slope_based_maximum - (slope_based_maximum_time - slope_based_minimum_time) * s2 min_dash_time_3 = slope_based_minimum_time * np.ones_like(min_dash_1) min_dash_3 = np.linspace(slope_based_minimum - width, slope_based_minimum + width, 101) dash_4_time = np.linspace(slope_based_maximum_time, slope_based_minimum_time) dash_4 = np.linspace(slope_based_maximum, slope_based_minimum) maxima_dash = np.linspace(2.5 - width, 2.5 + width, 101) maxima_dash_time_1 = maxima_x[-2] * np.ones_like(maxima_dash) maxima_dash_time_2 = maxima_x[-1] * np.ones_like(maxima_dash) maxima_dash_time_3 = slope_based_maximum_time * np.ones_like(maxima_dash) maxima_line_dash_time = np.linspace(maxima_x[-2], slope_based_maximum_time, 101) maxima_line_dash = 2.5 * np.ones_like(maxima_line_dash_time) minima_dash = np.linspace(-3.4 - width, -3.4 + width, 101) minima_dash_time_1 = minima_x[-2] * np.ones_like(minima_dash) minima_dash_time_2 = minima_x[-1] * np.ones_like(minima_dash) minima_dash_time_3 = slope_based_minimum_time * np.ones_like(minima_dash) minima_line_dash_time = np.linspace(minima_x[-2], slope_based_minimum_time, 101) minima_line_dash = -3.4 * np.ones_like(minima_line_dash_time) # slightly edit signal to make difference between slope-based method and improved slope-based method more clear time_series[time >= minima_x[-1]] = 1.5 * (time_series[time >= minima_x[-1]] - time_series[time == minima_x[-1]]) + \ time_series[time == minima_x[-1]] improved_slope_based_maximum_time = time[-1] improved_slope_based_maximum = time_series[-1] improved_slope_based_minimum_time = slope_based_minimum_time improved_slope_based_minimum = improved_slope_based_maximum + s2 * (improved_slope_based_minimum_time - improved_slope_based_maximum_time) min_dash_4 = np.linspace(improved_slope_based_minimum - width, improved_slope_based_minimum + width, 101) min_dash_time_4 = improved_slope_based_minimum_time * np.ones_like(min_dash_4) dash_final_time = np.linspace(improved_slope_based_maximum_time, improved_slope_based_minimum_time, 101) dash_final = np.linspace(improved_slope_based_maximum, improved_slope_based_minimum, 101) ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 0.9 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Slope-Based Edge Effects Example') plt.plot(max_dash_time_1, max_dash_1, 'k-') plt.plot(max_dash_time_2, max_dash_2, 'k-') plt.plot(max_dash_time_3, max_dash_3, 'k-') plt.plot(min_dash_time_1, min_dash_1, 'k-') plt.plot(min_dash_time_2, min_dash_2, 'k-') plt.plot(min_dash_time_3, min_dash_3, 'k-') plt.plot(min_dash_time_4, min_dash_4, 'k-') plt.plot(maxima_dash_time_1, maxima_dash, 'k-') plt.plot(maxima_dash_time_2, maxima_dash, 'k-') plt.plot(maxima_dash_time_3, maxima_dash, 'k-') plt.plot(minima_dash_time_1, minima_dash, 'k-') plt.plot(minima_dash_time_2, minima_dash, 'k-') plt.plot(minima_dash_time_3, minima_dash, 'k-') plt.text(4.34 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.74 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.12 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.50 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.30 * np.pi, 0.35, r'$s_1$') plt.text(4.43 * np.pi, -0.20, r'$s_2$') plt.text(4.30 * np.pi + (minima_x[-1] - minima_x[-2]), 0.35 + (minima_y[-1] - minima_y[-2]), r'$s_1$') plt.text(4.43 * np.pi + (slope_based_minimum_time - minima_x[-1]), -0.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.text(4.50 * np.pi + (slope_based_minimum_time - minima_x[-1]), 1.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.plot(minima_line_dash_time, minima_line_dash, 'k--') plt.plot(maxima_line_dash_time, maxima_line_dash, 'k--') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(dash_3_time, dash_3, 'k--') plt.plot(dash_4_time, dash_4, 'k--') plt.plot(dash_final_time, dash_final, 'k--') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(slope_based_maximum_time, slope_based_maximum, c='orange', zorder=4, label=textwrap.fill('Slope-based maximum', 11)) plt.scatter(slope_based_minimum_time, slope_based_minimum, c='purple', zorder=4, label=textwrap.fill('Slope-based minimum', 11)) plt.scatter(improved_slope_based_maximum_time, improved_slope_based_maximum, c='deeppink', zorder=4, label=textwrap.fill('Improved slope-based maximum', 11)) plt.scatter(improved_slope_based_minimum_time, improved_slope_based_minimum, c='dodgerblue', zorder=4, label=textwrap.fill('Improved slope-based minimum', 11)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-3, -2, -1, 0, 1, 2), ('-3', '-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_slope_based.png') plt.show() # plot 5 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] A2 = np.abs(maxima_y[-2] - minima_y[-2]) / 2 A1 = np.abs(maxima_y[-1] - minima_y[-1]) / 2 P2 = 2 * np.abs(maxima_x[-2] - minima_x[-2]) P1 = 2 * np.abs(maxima_x[-1] - minima_x[-1]) Huang_time = (P1 / P2) * (time[time >= maxima_x[-2]] - time[time == maxima_x[-2]]) + maxima_x[-1] Huang_wave = (A1 / A2) * (time_series[time >= maxima_x[-2]] - time_series[time == maxima_x[-2]]) + maxima_y[-1] Coughlin_time = Huang_time Coughlin_wave = A1 * np.cos(2 * np.pi * (1 / P1) * (Coughlin_time - Coughlin_time[0])) Average_max_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) Average_max = (maxima_y[-2] + maxima_y[-1]) / 2 Average_min_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) Average_min = (minima_y[-2] + minima_y[-1]) / 2 utils_Huang = emd_utils.Utility(time=time, time_series=Huang_wave) Huang_max_bool = utils_Huang.max_bool_func_1st_order_fd() Huang_min_bool = utils_Huang.min_bool_func_1st_order_fd() utils_Coughlin = emd_utils.Utility(time=time, time_series=Coughlin_wave) Coughlin_max_bool = utils_Coughlin.max_bool_func_1st_order_fd() Coughlin_min_bool = utils_Coughlin.min_bool_func_1st_order_fd() Huang_max_time = Huang_time[Huang_max_bool] Huang_max = Huang_wave[Huang_max_bool] Huang_min_time = Huang_time[Huang_min_bool] Huang_min = Huang_wave[Huang_min_bool] Coughlin_max_time = Coughlin_time[Coughlin_max_bool] Coughlin_max = Coughlin_wave[Coughlin_max_bool] Coughlin_min_time = Coughlin_time[Coughlin_min_bool] Coughlin_min = Coughlin_wave[Coughlin_min_bool] max_2_x_time = np.linspace(maxima_x[-2] - width, maxima_x[-2] + width, 101) max_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) max_2_x = maxima_y[-2] * np.ones_like(max_2_x_time) min_2_x_time = np.linspace(minima_x[-2] - width, minima_x[-2] + width, 101) min_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) min_2_x = minima_y[-2] * np.ones_like(min_2_x_time) dash_max_min_2_x = np.linspace(minima_y[-2], maxima_y[-2], 101) dash_max_min_2_x_time = 5.3 * np.pi * np.ones_like(dash_max_min_2_x) max_2_y = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) max_2_y_time = maxima_x[-2] * np.ones_like(max_2_y) min_2_y = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) min_2_y_time = minima_x[-2] * np.ones_like(min_2_y) dash_max_min_2_y_time = np.linspace(minima_x[-2], maxima_x[-2], 101) dash_max_min_2_y = -1.8 * np.ones_like(dash_max_min_2_y_time) max_1_x_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) max_1_x = maxima_y[-1] * np.ones_like(max_1_x_time) min_1_x_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) min_1_x = minima_y[-1] * np.ones_like(min_1_x_time) dash_max_min_1_x = np.linspace(minima_y[-1], maxima_y[-1], 101) dash_max_min_1_x_time = 5.4 * np.pi * np.ones_like(dash_max_min_1_x) max_1_y = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) max_1_y_time = maxima_x[-1] * np.ones_like(max_1_y) min_1_y = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) min_1_y_time = minima_x[-1] * np.ones_like(min_1_y) dash_max_min_1_y_time = np.linspace(minima_x[-1], maxima_x[-1], 101) dash_max_min_1_y = -2.1 * np.ones_like(dash_max_min_1_y_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Characteristic Wave Effects Example') plt.plot(time, time_series, LineWidth=2, label='Signal') plt.scatter(Huang_max_time, Huang_max, c='magenta', zorder=4, label=textwrap.fill('Huang maximum', 10)) plt.scatter(Huang_min_time, Huang_min, c='lime', zorder=4, label=textwrap.fill('Huang minimum', 10)) plt.scatter(Coughlin_max_time, Coughlin_max, c='darkorange', zorder=4, label=textwrap.fill('Coughlin maximum', 14)) plt.scatter(Coughlin_min_time, Coughlin_min, c='dodgerblue', zorder=4, label=textwrap.fill('Coughlin minimum', 14)) plt.scatter(Average_max_time, Average_max, c='orangered', zorder=4, label=textwrap.fill('Average maximum', 14)) plt.scatter(Average_min_time, Average_min, c='cyan', zorder=4, label=textwrap.fill('Average minimum', 14)) plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.plot(Huang_time, Huang_wave, '--', c='darkviolet', label=textwrap.fill('Huang Characteristic Wave', 14)) plt.plot(Coughlin_time, Coughlin_wave, '--', c='darkgreen', label=textwrap.fill('Coughlin Characteristic Wave', 14)) plt.plot(max_2_x_time, max_2_x, 'k-') plt.plot(max_2_x_time_side, max_2_x, 'k-') plt.plot(min_2_x_time, min_2_x, 'k-') plt.plot(min_2_x_time_side, min_2_x, 'k-') plt.plot(dash_max_min_2_x_time, dash_max_min_2_x, 'k--') plt.text(5.16 * np.pi, 0.85, r'$2a_2$') plt.plot(max_2_y_time, max_2_y, 'k-') plt.plot(max_2_y_time, max_2_y_side, 'k-') plt.plot(min_2_y_time, min_2_y, 'k-') plt.plot(min_2_y_time, min_2_y_side, 'k-') plt.plot(dash_max_min_2_y_time, dash_max_min_2_y, 'k--') plt.text(4.08 * np.pi, -2.2, r'$\frac{p_2}{2}$') plt.plot(max_1_x_time, max_1_x, 'k-') plt.plot(max_1_x_time_side, max_1_x, 'k-') plt.plot(min_1_x_time, min_1_x, 'k-') plt.plot(min_1_x_time_side, min_1_x, 'k-') plt.plot(dash_max_min_1_x_time, dash_max_min_1_x, 'k--') plt.text(5.42 * np.pi, -0.1, r'$2a_1$') plt.plot(max_1_y_time, max_1_y, 'k-') plt.plot(max_1_y_time, max_1_y_side, 'k-') plt.plot(min_1_y_time, min_1_y, 'k-') plt.plot(min_1_y_time, min_1_y_side, 'k-') plt.plot(dash_max_min_1_y_time, dash_max_min_1_y, 'k--') plt.text(4.48 * np.pi, -2.5, r'$\frac{p_1}{2}$') plt.xlim(3.9 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_characteristic_wave.png') plt.show() # plot 6 t = np.linspace(5, 95, 100) signal_orig = np.cos(2 * np.pi * t / 50) + 0.6 * np.cos(2 * np.pi * t / 25) + 0.5 * np.sin(2 * np.pi * t / 200) util_nn = emd_utils.Utility(time=t, time_series=signal_orig) maxima = signal_orig[util_nn.max_bool_func_1st_order_fd()] minima = signal_orig[util_nn.min_bool_func_1st_order_fd()] cs_max = CubicSpline(t[util_nn.max_bool_func_1st_order_fd()], maxima) cs_min = CubicSpline(t[util_nn.min_bool_func_1st_order_fd()], minima) time = np.linspace(0, 5 * np.pi, 1001) lsq_signal = np.cos(time) + np.cos(5 * time) knots = np.linspace(0, 5 * np.pi, 101) time_extended = time_extension(time) time_series_extended = np.zeros_like(time_extended) / 0 time_series_extended[int(len(lsq_signal) - 1):int(2 * (len(lsq_signal) - 1) + 1)] = lsq_signal neural_network_m = 200 neural_network_k = 100 # forward -> P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[(-(neural_network_m + neural_network_k - col)):(-(neural_network_m - col))] P[-1, col] = 1 # for additive constant t = lsq_signal[-neural_network_m:] # test - top seed_weights = np.ones(neural_network_k) / neural_network_k weights = 0 * seed_weights.copy() train_input = P[:-1, :] lr = 0.01 for iterations in range(1000): output = np.matmul(weights, train_input) error = (t - output) gradients = error * (- train_input) # guess average gradients average_gradients = np.mean(gradients, axis=1) # steepest descent max_gradient_vector = average_gradients * (np.abs(average_gradients) == max(np.abs(average_gradients))) adjustment = - lr * average_gradients # adjustment = - lr * max_gradient_vector weights += adjustment # test - bottom weights_right = np.hstack((weights, 0)) max_count_right = 0 min_count_right = 0 i_right = 0 while ((max_count_right < 1) or (min_count_right < 1)) and (i_right < len(lsq_signal) - 1): time_series_extended[int(2 * (len(lsq_signal) - 1) + 1 + i_right)] = \ sum(weights_right * np.hstack((time_series_extended[ int(2 * (len(lsq_signal) - 1) + 1 - neural_network_k + i_right): int(2 * (len(lsq_signal) - 1) + 1 + i_right)], 1))) i_right += 1 if i_right > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_right += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_right += 1 # backward <- P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[int(col + 1):int(col + neural_network_k + 1)] P[-1, col] = 1 # for additive constant t = lsq_signal[:neural_network_m] vx = cvx.Variable(int(neural_network_k + 1)) objective = cvx.Minimize(cvx.norm((2 * (vx * P) + 1 - t), 2)) # linear activation function is arbitrary prob = cvx.Problem(objective) result = prob.solve(verbose=True, solver=cvx.ECOS) weights_left = np.array(vx.value) max_count_left = 0 min_count_left = 0 i_left = 0 while ((max_count_left < 1) or (min_count_left < 1)) and (i_left < len(lsq_signal) - 1): time_series_extended[int(len(lsq_signal) - 2 - i_left)] = \ 2 * sum(weights_left * np.hstack((time_series_extended[int(len(lsq_signal) - 1 - i_left): int(len(lsq_signal) - 1 - i_left + neural_network_k)], 1))) + 1 i_left += 1 if i_left > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_left += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_left += 1 lsq_utils = emd_utils.Utility(time=time, time_series=lsq_signal) utils_extended = emd_utils.Utility(time=time_extended, time_series=time_series_extended) maxima = lsq_signal[lsq_utils.max_bool_func_1st_order_fd()] maxima_time = time[lsq_utils.max_bool_func_1st_order_fd()] maxima_extrapolate = time_series_extended[utils_extended.max_bool_func_1st_order_fd()][-1] maxima_extrapolate_time = time_extended[utils_extended.max_bool_func_1st_order_fd()][-1] minima = lsq_signal[lsq_utils.min_bool_func_1st_order_fd()] minima_time = time[lsq_utils.min_bool_func_1st_order_fd()] minima_extrapolate = time_series_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] minima_extrapolate_time = time_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Single Neuron Neural Network Example') plt.plot(time, lsq_signal, zorder=2, label='Signal') plt.plot(time_extended, time_series_extended, c='g', zorder=1, label=textwrap.fill('Extrapolated signal', 12)) plt.scatter(maxima_time, maxima, c='r', zorder=3, label='Maxima') plt.scatter(minima_time, minima, c='b', zorder=3, label='Minima') plt.scatter(maxima_extrapolate_time, maxima_extrapolate, c='magenta', zorder=3, label=textwrap.fill('Extrapolated maxima', 12)) plt.scatter(minima_extrapolate_time, minima_extrapolate, c='cyan', zorder=4, label=textwrap.fill('Extrapolated minima', 12)) plt.plot(((time[-302] + time[-301]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k', label=textwrap.fill('Neural network inputs', 13)) plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(((time_extended[-1001] + time_extended[-1002]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k') plt.plot(((time[-202] + time[-201]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed', label=textwrap.fill('Neural network targets', 13)) plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2), ((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2), ((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='gray') plt.plot(((time_extended[-1001] + time_extended[-1000]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed') plt.xlim(3.4 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/neural_network.png') plt.show() # plot 6a np.random.seed(0) time = np.linspace(0, 5 * np.pi, 1001) knots_51 = np.linspace(0, 5 * np.pi, 51) time_series = np.cos(2 * time) + np.cos(4 * time) + np.cos(8 * time) noise = np.random.normal(0, 1, len(time_series)) time_series += noise advemdpy = EMD(time=time, time_series=time_series) imfs_51, hts_51, ifs_51 = advemdpy.empirical_mode_decomposition(knots=knots_51, max_imfs=3, edge_effect='symmetric_anchor', verbose=False)[:3] knots_31 = np.linspace(0, 5 * np.pi, 31) imfs_31, hts_31, ifs_31 = advemdpy.empirical_mode_decomposition(knots=knots_31, max_imfs=2, edge_effect='symmetric_anchor', verbose=False)[:3] knots_11 = np.linspace(0, 5 * np.pi, 11) imfs_11, hts_11, ifs_11 = advemdpy.empirical_mode_decomposition(knots=knots_11, max_imfs=1, edge_effect='symmetric_anchor', verbose=False)[:3] fig, axs = plt.subplots(3, 1) plt.suptitle(textwrap.fill('Comparison of Trends Extracted with Different Knot Sequences', 40)) plt.subplots_adjust(hspace=0.1) axs[0].plot(time, time_series, label='Time series') axs[0].plot(time, imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 1, IMF 2, & IMF 3 with 51 knots', 21)) print(f'DFA fluctuation with 51 knots: {np.round(np.var(time_series - (imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :])), 3)}') for knot in knots_51: axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[0].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[0].set_xticklabels(['', '', '', '', '', '']) axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[0].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[0].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].plot(time, time_series, label='Time series') axs[1].plot(time, imfs_31[1, :] + imfs_31[2, :], label=textwrap.fill('Sum of IMF 1 and IMF 2 with 31 knots', 19)) axs[1].plot(time, imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 2 and IMF 3 with 51 knots', 19)) print(f'DFA fluctuation with 31 knots: {np.round(np.var(time_series - (imfs_31[1, :] + imfs_31[2, :])), 3)}') for knot in knots_31: axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[1].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[1].set_xticklabels(['', '', '', '', '', '']) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) axs[1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[1].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[1].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[1].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') axs[2].plot(time, time_series, label='Time series') axs[2].plot(time, imfs_11[1, :], label='IMF 1 with 11 knots') axs[2].plot(time, imfs_31[2, :], label='IMF 2 with 31 knots') axs[2].plot(time, imfs_51[3, :], label='IMF 3 with 51 knots') print(f'DFA fluctuation with 11 knots: {np.round(np.var(time_series - imfs_51[3, :]), 3)}') for knot in knots_11: axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[2].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[2].set_xticklabels(['$0$', r'$\pi$', r'$2\pi$', r'$3\pi$', r'$4\pi$', r'$5\pi$']) box_2 = axs[2].get_position() axs[2].set_position([box_2.x0 - 0.05, box_2.y0, box_2.width * 0.85, box_2.height]) axs[2].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[2].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[2].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[2].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[2].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') plt.savefig('jss_figures/DFA_different_trends.png') plt.show() # plot 6b fig, axs = plt.subplots(3, 1) plt.suptitle(textwrap.fill('Comparison of Trends Extracted with Different Knot Sequences Zoomed Region', 40)) plt.subplots_adjust(hspace=0.1) axs[0].plot(time, time_series, label='Time series') axs[0].plot(time, imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 1, IMF 2, & IMF 3 with 51 knots', 21)) for knot in knots_51: axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[0].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[0].set_xticklabels(['', '', '', '', '', '']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[0].set_ylim(-5.5, 5.5) axs[0].set_xlim(0.95 * np.pi, 1.55 * np.pi) axs[1].plot(time, time_series, label='Time series') axs[1].plot(time, imfs_31[1, :] + imfs_31[2, :], label=textwrap.fill('Sum of IMF 1 and IMF 2 with 31 knots', 19)) axs[1].plot(time, imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 2 and IMF 3 with 51 knots', 19)) for knot in knots_31: axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[1].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[1].set_xticklabels(['', '', '', '', '', '']) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) axs[1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].set_ylim(-5.5, 5.5) axs[1].set_xlim(0.95 * np.pi, 1.55 * np.pi) axs[2].plot(time, time_series, label='Time series') axs[2].plot(time, imfs_11[1, :], label='IMF 1 with 11 knots') axs[2].plot(time, imfs_31[2, :], label='IMF 2 with 31 knots') axs[2].plot(time, imfs_51[3, :], label='IMF 3 with 51 knots') for knot in knots_11: axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[2].set_xticks([np.pi, (3 / 2) * np.pi]) axs[2].set_xticklabels([r'$\pi$', r'$\frac{3}{2}\pi$']) box_2 = axs[2].get_position() axs[2].set_position([box_2.x0 - 0.05, box_2.y0, box_2.width * 0.85, box_2.height]) axs[2].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[2].set_ylim(-5.5, 5.5) axs[2].set_xlim(0.95 * np.pi, 1.55 * np.pi) plt.savefig('jss_figures/DFA_different_trends_zoomed.png') plt.show() hs_ouputs = hilbert_spectrum(time, imfs_51, hts_51, ifs_51, max_frequency=12, plot=False) # plot 6c ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 0.9 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Simple Sinusoidal Time Seres with Added Noise', 50)) x_hs, y, z = hs_ouputs z_min, z_max = 0, np.abs(z).max() ax.pcolormesh(x_hs, y, np.abs(z), cmap='gist_rainbow', vmin=z_min, vmax=z_max) ax.plot(x_hs[0, :], 8 * np.ones_like(x_hs[0, :]), '--', label=r'$\omega = 8$', Linewidth=3) ax.plot(x_hs[0, :], 4 * np.ones_like(x_hs[0, :]), '--', label=r'$\omega = 4$', Linewidth=3) ax.plot(x_hs[0, :], 2 * np.ones_like(x_hs[0, :]), '--', label=r'$\omega = 2$', Linewidth=3) ax.set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi]) ax.set_xticklabels(['$0$', r'$\pi$', r'$2\pi$', r'$3\pi$', r'$4\pi$']) plt.ylabel(r'Frequency (rad.s$^{-1}$)') plt.xlabel('Time (s)') box_0 = ax.get_position() ax.set_position([box_0.x0, box_0.y0 + 0.05, box_0.width * 0.85, box_0.height * 0.9]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/DFA_hilbert_spectrum.png') plt.show() # plot 6c time = np.linspace(0, 5 * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) knots = np.linspace(0, 5 * np.pi, 51) fluc = Fluctuation(time=time, time_series=time_series) max_unsmoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='maxima', smooth=False) max_smoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='maxima', smooth=True) min_unsmoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='minima', smooth=False) min_smoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='minima', smooth=True) util = Utility(time=time, time_series=time_series) maxima = util.max_bool_func_1st_order_fd() minima = util.min_bool_func_1st_order_fd() ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title(textwrap.fill('Plot Demonstrating Unsmoothed Extrema Envelopes if Schoenberg–Whitney Conditions are Not Satisfied', 50)) plt.plot(time, time_series, label='Time series', zorder=2, LineWidth=2) plt.scatter(time[maxima], time_series[maxima], c='r', label='Maxima', zorder=10) plt.scatter(time[minima], time_series[minima], c='b', label='Minima', zorder=10) plt.plot(time, max_unsmoothed[0], label=textwrap.fill('Unsmoothed maxima envelope', 10), c='darkorange') plt.plot(time, max_smoothed[0], label=textwrap.fill('Smoothed maxima envelope', 10), c='red') plt.plot(time, min_unsmoothed[0], label=textwrap.fill('Unsmoothed minima envelope', 10), c='cyan') plt.plot(time, min_smoothed[0], label=textwrap.fill('Smoothed minima envelope', 10), c='blue') for knot in knots[:-1]: plt.plot(knot * np.ones(101), np.linspace(-3.0, -2.0, 101), '--', c='grey', zorder=1) plt.plot(knots[-1] * np.ones(101), np.linspace(-3.0, -2.0, 101), '--', c='grey', label='Knots', zorder=1) plt.xticks((0, 1 * np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi), (r'$0$', r'$\pi$', r'2$\pi$', r'3$\pi$', r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) plt.xlim(-0.25 * np.pi, 5.25 * np.pi) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/Schoenberg_Whitney_Conditions.png') plt.show() # plot 7 a = 0.25 width = 0.2 time = np.linspace((0 + a) * np.pi, (5 - a) * np.pi, 1001) knots = np.linspace((0 + a) * np.pi, (5 - a) * np.pi, 11) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] inflection_bool = utils.inflection_point() inflection_x = time[inflection_bool] inflection_y = time_series[inflection_bool] fluctuation = emd_mean.Fluctuation(time=time, time_series=time_series) maxima_envelope = fluctuation.envelope_basis_function_approximation(knots, 'maxima', smooth=False, smoothing_penalty=0.2, edge_effect='none', spline_method='b_spline')[0] maxima_envelope_smooth = fluctuation.envelope_basis_function_approximation(knots, 'maxima', smooth=True, smoothing_penalty=0.2, edge_effect='none', spline_method='b_spline')[0] minima_envelope = fluctuation.envelope_basis_function_approximation(knots, 'minima', smooth=False, smoothing_penalty=0.2, edge_effect='none', spline_method='b_spline')[0] minima_envelope_smooth = fluctuation.envelope_basis_function_approximation(knots, 'minima', smooth=True, smoothing_penalty=0.2, edge_effect='none', spline_method='b_spline')[0] inflection_points_envelope = fluctuation.direct_detrended_fluctuation_estimation(knots, smooth=True, smoothing_penalty=0.2, technique='inflection_points')[0] binomial_points_envelope = fluctuation.direct_detrended_fluctuation_estimation(knots, smooth=True, smoothing_penalty=0.2, technique='binomial_average', order=21, increment=20)[0] derivative_of_lsq = utils.derivative_forward_diff() derivative_time = time[:-1] derivative_knots = np.linspace(knots[0], knots[-1], 31) # change (1) detrended_fluctuation_technique and (2) max_internal_iter and (3) debug (confusing with external debugging) emd = AdvEMDpy.EMD(time=derivative_time, time_series=derivative_of_lsq) imf_1_of_derivative = emd.empirical_mode_decomposition(knots=derivative_knots, knot_time=derivative_time, text=False, verbose=False)[0][1, :] utils = emd_utils.Utility(time=time[:-1], time_series=imf_1_of_derivative) optimal_maxima = np.r_[False, utils.derivative_forward_diff() < 0, False] & \ np.r_[utils.zero_crossing() == 1, False] optimal_minima = np.r_[False, utils.derivative_forward_diff() > 0, False] & \ np.r_[utils.zero_crossing() == 1, False] EEMD_maxima_envelope = fluctuation.envelope_basis_function_approximation_fixed_points(knots, 'maxima', optimal_maxima, optimal_minima, smooth=False, smoothing_penalty=0.2, edge_effect='none')[0] EEMD_minima_envelope = fluctuation.envelope_basis_function_approximation_fixed_points(knots, 'minima', optimal_maxima, optimal_minima, smooth=False, smoothing_penalty=0.2, edge_effect='none')[0] ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Detrended Fluctuation Analysis Examples') plt.plot(time, time_series, LineWidth=2, label='Time series') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(time[optimal_maxima], time_series[optimal_maxima], c='darkred', zorder=4, label=textwrap.fill('Optimal maxima', 10)) plt.scatter(time[optimal_minima], time_series[optimal_minima], c='darkblue', zorder=4, label=textwrap.fill('Optimal minima', 10)) plt.scatter(inflection_x, inflection_y, c='magenta', zorder=4, label=textwrap.fill('Inflection points', 10)) plt.plot(time, maxima_envelope, c='darkblue', label=textwrap.fill('EMD envelope', 10)) plt.plot(time, minima_envelope, c='darkblue') plt.plot(time, (maxima_envelope + minima_envelope) / 2, c='darkblue') plt.plot(time, maxima_envelope_smooth, c='darkred', label=textwrap.fill('SEMD envelope', 10)) plt.plot(time, minima_envelope_smooth, c='darkred') plt.plot(time, (maxima_envelope_smooth + minima_envelope_smooth) / 2, c='darkred') plt.plot(time, EEMD_maxima_envelope, c='darkgreen', label=textwrap.fill('EEMD envelope', 10)) plt.plot(time, EEMD_minima_envelope, c='darkgreen') plt.plot(time, (EEMD_maxima_envelope + EEMD_minima_envelope) / 2, c='darkgreen') plt.plot(time, inflection_points_envelope, c='darkorange', label=textwrap.fill('Inflection point envelope', 10)) plt.plot(time, binomial_points_envelope, c='deeppink', label=textwrap.fill('Binomial average envelope', 10)) plt.plot(time, np.cos(time), c='black', label='True mean') plt.xticks((0, 1 * np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi), (r'$0$', r'$\pi$', r'2$\pi$', r'3$\pi$', r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) plt.xlim(-0.25 * np.pi, 5.25 * np.pi) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/detrended_fluctuation_analysis.png') plt.show() # Duffing Equation Example def duffing_equation(xy, ts): gamma = 0.1 epsilon = 1 omega = ((2 * np.pi) / 25) return [xy[1], xy[0] - epsilon * xy[0] ** 3 + gamma * np.cos(omega * ts)] t = np.linspace(0, 150, 1501) XY0 = [1, 1] solution = odeint(duffing_equation, XY0, t) x = solution[:, 0] dxdt = solution[:, 1] x_points = [0, 50, 100, 150] x_names = {0, 50, 100, 150} y_points_1 = [-2, 0, 2] y_points_2 = [-1, 0, 1] fig, axs = plt.subplots(2, 1) plt.subplots_adjust(hspace=0.2) axs[0].plot(t, x) axs[0].set_title('Duffing Equation Displacement') axs[0].set_ylim([-2, 2]) axs[0].set_xlim([0, 150]) axs[1].plot(t, dxdt) axs[1].set_title('Duffing Equation Velocity') axs[1].set_ylim([-1.5, 1.5]) axs[1].set_xlim([0, 150]) axis = 0 for ax in axs.flat: ax.label_outer() if axis == 0: ax.set_ylabel('x(t)') ax.set_yticks(y_points_1) if axis == 1: ax.set_ylabel(r'$ \dfrac{dx(t)}{dt} $') ax.set(xlabel='t') ax.set_yticks(y_points_2) ax.set_xticks(x_points) ax.set_xticklabels(x_names) axis += 1 plt.savefig('jss_figures/Duffing_equation.png') plt.show() # compare other packages Duffing - top pyemd = pyemd0215() py_emd = pyemd(x) IP, IF, IA = emd040.spectra.frequency_transform(py_emd.T, 10, 'hilbert') freq_edges, freq_bins = emd040.spectra.define_hist_bins(0, 0.2, 100) hht = emd040.spectra.hilberthuang(IF, IA, freq_edges) hht = gaussian_filter(hht, sigma=1) ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 1.0 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Duffing Equation using PyEMD 0.2.10', 40)) plt.pcolormesh(t, freq_bins, hht, cmap='gist_rainbow', vmin=0, vmax=np.max(np.max(np.abs(hht)))) plt.plot(t[:-1], 0.124 * np.ones_like(t[:-1]), '--', label=textwrap.fill('Hamiltonian frequency approximation', 15)) plt.plot(t[:-1], 0.04 * np.ones_like(t[:-1]), 'g--', label=textwrap.fill('Driving function frequency', 15)) plt.xticks([0, 50, 100, 150]) plt.yticks([0, 0.1, 0.2]) plt.ylabel('Frequency (Hz)') plt.xlabel('Time (s)') box_0 = ax.get_position() ax.set_position([box_0.x0, box_0.y0 + 0.05, box_0.width * 0.75, box_0.height * 0.9]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/Duffing_equation_ht_pyemd.png') plt.show() plt.show() emd_sift = emd040.sift.sift(x) IP, IF, IA = emd040.spectra.frequency_transform(emd_sift, 10, 'hilbert') freq_edges, freq_bins = emd040.spectra.define_hist_bins(0, 0.2, 100) hht = emd040.spectra.hilberthuang(IF, IA, freq_edges) hht = gaussian_filter(hht, sigma=1) ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 1.0 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Duffing Equation using emd 0.3.3', 40)) plt.pcolormesh(t, freq_bins, hht, cmap='gist_rainbow', vmin=0, vmax=np.max(np.max(np.abs(hht)))) plt.plot(t[:-1], 0.124 * np.ones_like(t[:-1]), '--', label=textwrap.fill('Hamiltonian frequency approximation', 15)) plt.plot(t[:-1], 0.04 * np.ones_like(t[:-1]), 'g--', label=textwrap.fill('Driving function frequency', 15)) plt.xticks([0, 50, 100, 150]) plt.yticks([0, 0.1, 0.2]) plt.ylabel('Frequency (Hz)') plt.xlabel('Time (s)') box_0 = ax.get_position() ax.set_position([box_0.x0, box_0.y0 + 0.05, box_0.width * 0.75, box_0.height * 0.9]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/Duffing_equation_ht_emd.png') plt.show() # compare other packages Duffing - bottom emd_duffing = AdvEMDpy.EMD(time=t, time_series=x) emd_duff, emd_ht_duff, emd_if_duff, _, _, _, _ = emd_duffing.empirical_mode_decomposition(verbose=False) fig, axs = plt.subplots(2, 1) plt.subplots_adjust(hspace=0.3) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) axs[0].plot(t, emd_duff[1, :], label='AdvEMDpy') axs[0].plot(t, py_emd[0, :], '--', label='PyEMD 0.2.10') axs[0].plot(t, emd_sift[:, 0], '--', label='emd 0.3.3') axs[0].set_title('IMF 1') axs[0].set_ylim([-2, 2]) axs[0].set_xlim([0, 150]) axs[1].plot(t, emd_duff[2, :], label='AdvEMDpy') print(f'AdvEMDpy driving function error: {np.round(sum(abs(0.1 * np.cos(0.04 * 2 * np.pi * t) - emd_duff[2, :])), 3)}') axs[1].plot(t, py_emd[1, :], '--', label='PyEMD 0.2.10') print(f'PyEMD driving function error: {np.round(sum(abs(0.1 * np.cos(0.04 * 2 * np.pi * t) - py_emd[1, :])), 3)}') axs[1].plot(t, emd_sift[:, 1], '--', label='emd 0.3.3') print(f'emd driving function error: {np.round(sum(abs(0.1 * np.cos(0.04 * 2 * np.pi * t) - emd_sift[:, 1])), 3)}') axs[1].plot(t, 0.1 * np.cos(0.04 * 2 * np.pi * t), '--', label=r'$0.1$cos$(0.08{\pi}t)$') axs[1].set_title('IMF 2') axs[1].set_ylim([-0.2, 0.4]) axs[1].set_xlim([0, 150]) axis = 0 for ax in axs.flat: ax.label_outer() if axis == 0: ax.set_ylabel(r'$\gamma_1(t)$') ax.set_yticks([-2, 0, 2]) if axis == 1: ax.set_ylabel(r'$\gamma_2(t)$') ax.set_yticks([-0.2, 0, 0.2]) box_0 = ax.get_position() ax.set_position([box_0.x0, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) ax.set_xticks(x_points) ax.set_xticklabels(x_names) axis += 1 plt.savefig('jss_figures/Duffing_equation_imfs.png') plt.show() hs_ouputs = hilbert_spectrum(t, emd_duff, emd_ht_duff, emd_if_duff, max_frequency=1.3, plot=False) ax = plt.subplot(111) plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Duffing Equation using AdvEMDpy', 40)) x, y, z = hs_ouputs y = y / (2 * np.pi) z_min, z_max = 0,
np.abs(z)
numpy.abs
import argparse import json import numpy as np import pandas as pd import os from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report,f1_score from keras.models import Sequential from keras.layers import Dense, Dropout from keras import backend as K from keras.utils.vis_utils import plot_model from sklearn.externals import joblib import time def f1(y_true, y_pred): def recall(y_true, y_pred): """Recall metric. Only computes a batch-wise average of recall. Computes the recall, a metric for multi-label classification of how many relevant items are selected. """ true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) recall = true_positives / (possible_positives + K.epsilon()) return recall def precision(y_true, y_pred): """Precision metric. Only computes a batch-wise average of precision. Computes the precision, a metric for multi-label classification of how many selected items are relevant. """ true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + K.epsilon()) return precision precision = precision(y_true, y_pred) recall = recall(y_true, y_pred) return 2*((precision*recall)/(precision+recall+K.epsilon())) def get_embeddings(sentences_list,layer_json): ''' :param sentences_list: the path o the sentences.txt :param layer_json: the path of the json file that contains the embeddings of the sentences :return: Dictionary with key each sentence of the sentences_list and as value the embedding ''' sentences = dict()#dict with key the index of each line of the sentences_list.txt and as value the sentence embeddings = dict()##dict with key the index of each sentence and as value the its embedding sentence_emb = dict()#key:sentence,value:its embedding with open(sentences_list,'r') as file: for index,line in enumerate(file): sentences[index] = line.strip() with open(layer_json, 'r',encoding='utf-8') as f: for line in f: embeddings[json.loads(line)['linex_index']] = np.asarray(json.loads(line)['features']) for key,value in sentences.items(): sentence_emb[value] = embeddings[key] return sentence_emb def train_classifier(sentences_list,layer_json,dataset_csv,filename): ''' :param sentences_list: the path o the sentences.txt :param layer_json: the path of the json file that contains the embeddings of the sentences :param dataset_csv: the path of the dataset :param filename: The path of the pickle file that the model will be stored :return: ''' dataset = pd.read_csv(dataset_csv) bert_dict = get_embeddings(sentences_list,layer_json) length = list() sentence_emb = list() previous_emb = list() next_list = list() section_list = list() label = list() errors = 0 for row in dataset.iterrows(): sentence = row[1][0].strip() previous = row[1][1].strip() nexts = row[1][2].strip() section = row[1][3].strip() if sentence in bert_dict: sentence_emb.append(bert_dict[sentence]) else: sentence_emb.append(np.zeros(768)) print(sentence) errors += 1 if previous in bert_dict: previous_emb.append(bert_dict[previous]) else: previous_emb.append(np.zeros(768)) if nexts in bert_dict: next_list.append(bert_dict[nexts]) else: next_list.append(np.zeros(768)) if section in bert_dict: section_list.append(bert_dict[section]) else: section_list.append(np.zeros(768)) length.append(row[1][4]) label.append(row[1][5]) sentence_emb = np.asarray(sentence_emb) print(sentence_emb.shape) next_emb = np.asarray(next_list) print(next_emb.shape) previous_emb = np.asarray(previous_emb) print(previous_emb.shape) section_emb = np.asarray(section_list) print(sentence_emb.shape) length = np.asarray(length) print(length.shape) label = np.asarray(label) print(errors) features = np.concatenate([sentence_emb, previous_emb, next_emb,section_emb], axis=1) features = np.column_stack([features, length]) # np.append(features,length,axis=1) print(features.shape) X_train, X_val, y_train, y_val = train_test_split(features, label, test_size=0.33, random_state=42) log = LogisticRegression(random_state=0, solver='newton-cg', max_iter=1000, C=0.1) log.fit(X_train, y_train) #save the model _ = joblib.dump(log, filename, compress=9) predictions = log.predict(X_val) print("###########################################") print("Results using embeddings from the",layer_json,"file") print(classification_report(y_val, predictions)) print("F1 score using Logistic Regression:",f1_score(y_val, predictions)) print("###########################################") #train a DNN f1_results = list() for i in range(3): model = Sequential() model.add(Dense(64, activation='relu', trainable=True)) model.add(Dense(128, activation='relu', trainable=True)) model.add(Dropout(0.30)) model.add(Dense(64, activation='relu', trainable=True)) model.add(Dropout(0.25)) model.add(Dense(64, activation='relu', trainable=True)) model.add(Dropout(0.35)) model.add(Dense(1, activation='sigmoid')) # compile network model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=[f1]) # fit network model.fit(X_train, y_train, epochs=100, batch_size=64) loss, f_1 = model.evaluate(X_val, y_val, verbose=1) print('\nTest F1: %f' % (f_1 * 100)) f1_results.append(f_1) model = None print("###########################################") print("Results using embeddings from the", layer_json, "file") # evaluate print(np.mean(f1_results)) print("###########################################") def parameter_tuning_LR(sentences_list,layer_json,dataset_csv): ''' :param sentences_list: the path o the sentences.txt :param layer_json: the path of the json file that contains the embeddings of the sentences :param dataset_csv: the path of the dataset :return: ''' dataset = pd.read_csv(dataset_csv) bert_dict = get_embeddings(sentences_list,layer_json) length = list() sentence_emb = list() previous_emb = list() next_list = list() section_list = list() label = list() errors = 0 for row in dataset.iterrows(): sentence = row[1][0].strip() previous = row[1][1].strip() nexts = row[1][2].strip() section = row[1][3].strip() if sentence in bert_dict: sentence_emb.append(bert_dict[sentence]) else: sentence_emb.append(
np.zeros(768)
numpy.zeros
import os import string from collections import Counter from datetime import datetime from functools import partial from pathlib import Path from typing import Optional import numpy as np import pandas as pd from scipy.stats.stats import chisquare from tangled_up_in_unicode import block, block_abbr, category, category_long, script from pandas_profiling.config import Settings from pandas_profiling.model.summary_helpers_image import ( extract_exif, hash_image, is_image_truncated, open_image, ) def mad(arr: np.ndarray) -> np.ndarray: """Median Absolute Deviation: a "Robust" version of standard deviation. Indices variability of the sample. https://en.wikipedia.org/wiki/Median_absolute_deviation """ return np.median(np.abs(arr - np.median(arr))) def named_aggregate_summary(series: pd.Series, key: str) -> dict: summary = { f"max_{key}": np.max(series), f"mean_{key}": np.mean(series), f"median_{key}": np.median(series), f"min_{key}": np.min(series), } return summary def length_summary(series: pd.Series, summary: dict = None) -> dict: if summary is None: summary = {} length = series.str.len() summary.update({"length": length}) summary.update(named_aggregate_summary(length, "length")) return summary def file_summary(series: pd.Series) -> dict: """ Args: series: series to summarize Returns: """ # Transform stats = series.map(lambda x: os.stat(x)) def convert_datetime(x: float) -> str: return datetime.fromtimestamp(x).strftime("%Y-%m-%d %H:%M:%S") # Transform some more summary = { "file_size": stats.map(lambda x: x.st_size), "file_created_time": stats.map(lambda x: x.st_ctime).map(convert_datetime), "file_accessed_time": stats.map(lambda x: x.st_atime).map(convert_datetime), "file_modified_time": stats.map(lambda x: x.st_mtime).map(convert_datetime), } return summary def path_summary(series: pd.Series) -> dict: """ Args: series: series to summarize Returns: """ # TODO: optimize using value counts summary = { "common_prefix": os.path.commonprefix(series.values.tolist()) or "No common prefix", "stem_counts": series.map(lambda x: os.path.splitext(x)[0]).value_counts(), "suffix_counts": series.map(lambda x: os.path.splitext(x)[1]).value_counts(), "name_counts": series.map(lambda x: os.path.basename(x)).value_counts(), "parent_counts": series.map(lambda x: os.path.dirname(x)).value_counts(), "anchor_counts": series.map(lambda x: os.path.splitdrive(x)[0]).value_counts(), } summary["n_stem_unique"] = len(summary["stem_counts"]) summary["n_suffix_unique"] = len(summary["suffix_counts"]) summary["n_name_unique"] = len(summary["name_counts"]) summary["n_parent_unique"] = len(summary["parent_counts"]) summary["n_anchor_unique"] = len(summary["anchor_counts"]) return summary def url_summary(series: pd.Series) -> dict: """ Args: series: series to summarize Returns: """ summary = { "scheme_counts": series.map(lambda x: x.scheme).value_counts(), "netloc_counts": series.map(lambda x: x.netloc).value_counts(), "path_counts": series.map(lambda x: x.path).value_counts(), "query_counts": series.map(lambda x: x.query).value_counts(), "fragment_counts": series.map(lambda x: x.fragment).value_counts(), } return summary def count_duplicate_hashes(image_descriptions: dict) -> int: """ Args: image_descriptions: Returns: """ counts = pd.Series( [x["hash"] for x in image_descriptions if "hash" in x] ).value_counts() return counts.sum() - len(counts) def extract_exif_series(image_exifs: list) -> dict: """ Args: image_exifs: Returns: """ exif_keys = [] exif_values: dict = {} for image_exif in image_exifs: # Extract key exif_keys.extend(list(image_exif.keys())) # Extract values per key for exif_key, exif_val in image_exif.items(): if exif_key not in exif_values: exif_values[exif_key] = [] exif_values[exif_key].append(exif_val) series = {"exif_keys": pd.Series(exif_keys, dtype=object).value_counts().to_dict()} for k, v in exif_values.items(): series[k] = pd.Series(v).value_counts() return series def extract_image_information( path: Path, exif: bool = False, hash: bool = False ) -> dict: """Extracts all image information per file, as opening files is slow Args: path: Path to the image exif: extract exif information hash: calculate hash (for duplicate detection) Returns: A dict containing image information """ information: dict = {} image = open_image(path) information["opened"] = image is not None if image is not None: information["truncated"] = is_image_truncated(image) if not information["truncated"]: information["size"] = image.size if exif: information["exif"] = extract_exif(image) if hash: information["hash"] = hash_image(image) return information def image_summary(series: pd.Series, exif: bool = False, hash: bool = False) -> dict: """ Args: series: series to summarize exif: extract exif information hash: calculate hash (for duplicate detection) Returns: """ image_information = series.apply( partial(extract_image_information, exif=exif, hash=hash) ) summary = { "n_truncated": sum( [1 for x in image_information if "truncated" in x and x["truncated"]] ), "image_dimensions": pd.Series( [x["size"] for x in image_information if "size" in x], name="image_dimensions", ), } image_widths = summary["image_dimensions"].map(lambda x: x[0]) summary.update(named_aggregate_summary(image_widths, "width")) image_heights = summary["image_dimensions"].map(lambda x: x[1]) summary.update(named_aggregate_summary(image_heights, "height")) image_areas = image_widths * image_heights summary.update(named_aggregate_summary(image_areas, "area")) if hash: summary["n_duplicate_hash"] = count_duplicate_hashes(image_information) if exif: exif_series = extract_exif_series( [x["exif"] for x in image_information if "exif" in x] ) summary["exif_keys_counts"] = exif_series["exif_keys"] summary["exif_data"] = exif_series return summary def get_character_counts(series: pd.Series) -> Counter: """Function to return the character counts Args: series: the Series to process Returns: A dict with character counts """ return Counter(series.str.cat()) def counter_to_series(counter: Counter) -> pd.Series: if not counter: return pd.Series([], dtype=object) counter_as_tuples = counter.most_common() items, counts = zip(*counter_as_tuples) return pd.Series(counts, index=items) def unicode_summary(series: pd.Series) -> dict: # Unicode Character Summaries (category and script name) character_counts = get_character_counts(series) character_counts_series = counter_to_series(character_counts) char_to_block = {key: block(key) for key in character_counts.keys()} char_to_category_short = {key: category(key) for key in character_counts.keys()} char_to_script = {key: script(key) for key in character_counts.keys()} summary = { "n_characters": len(character_counts_series), "character_counts": character_counts_series, "category_alias_values": { key: category_long(value) for key, value in char_to_category_short.items() }, "block_alias_values": { key: block_abbr(value) for key, value in char_to_block.items() }, } # Retrieve original distribution block_alias_counts: Counter = Counter() per_block_char_counts: dict = { k: Counter() for k in summary["block_alias_values"].values() } for char, n_char in character_counts.items(): block_name = summary["block_alias_values"][char] block_alias_counts[block_name] += n_char per_block_char_counts[block_name][char] = n_char summary["block_alias_counts"] = counter_to_series(block_alias_counts) summary["block_alias_char_counts"] = { k: counter_to_series(v) for k, v in per_block_char_counts.items() } script_counts: Counter = Counter() per_script_char_counts: dict = {k: Counter() for k in char_to_script.values()} for char, n_char in character_counts.items(): script_name = char_to_script[char] script_counts[script_name] += n_char per_script_char_counts[script_name][char] = n_char summary["script_counts"] = counter_to_series(script_counts) summary["script_char_counts"] = { k: counter_to_series(v) for k, v in per_script_char_counts.items() } category_alias_counts: Counter = Counter() per_category_alias_char_counts: dict = { k: Counter() for k in summary["category_alias_values"].values() } for char, n_char in character_counts.items(): category_alias_name = summary["category_alias_values"][char] category_alias_counts[category_alias_name] += n_char per_category_alias_char_counts[category_alias_name][char] += n_char summary["category_alias_counts"] = counter_to_series(category_alias_counts) summary["category_alias_char_counts"] = { k: counter_to_series(v) for k, v in per_category_alias_char_counts.items() } # Unique counts summary["n_category"] = len(summary["category_alias_counts"]) summary["n_scripts"] = len(summary["script_counts"]) summary["n_block_alias"] = len(summary["block_alias_counts"]) if len(summary["category_alias_counts"]) > 0: summary["category_alias_counts"].index = summary[ "category_alias_counts" ].index.str.replace("_", " ") return summary def histogram_compute( config: Settings, finite_values: np.ndarray, n_unique: int, name: str = "histogram", weights: Optional[np.ndarray] = None, ) -> dict: stats = {} bins = config.plot.histogram.bins bins_arg = "auto" if bins == 0 else min(bins, n_unique) stats[name] = np.histogram(finite_values, bins=bins_arg, weights=weights) max_bins = config.plot.histogram.max_bins if bins_arg == "auto" and len(stats[name][1]) > max_bins: stats[name] = np.histogram(finite_values, bins=max_bins, weights=None) return stats def chi_square( values: Optional[np.ndarray] = None, histogram: Optional[np.ndarray] = None ) -> dict: if histogram is None: histogram, _ =
np.histogram(values, bins="auto")
numpy.histogram
#!/usr/bin/env python3 import tensorflow as tf physical_devices = tf.config.list_physical_devices('GPU') try: tf.config.experimental.set_memory_growth(physical_devices[0], True) except: # Invalid device or cannot modify virtual devices once initialized. pass import numpy as np import os, time, csv import tqdm import umap import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import datetime import signal import net from matplotlib import rcParams rcParams['font.family'] = 'sans-serif' rcParams['font.sans-serif'] = ['Hiragino Maru Gothic Pro', 'Yu Gothic', 'Meirio', 'Takao', 'IPAexGothic', 'IPAPGothic', 'Noto Sans CJK JP'] import net class SimpleEncodeDecoder: def __init__(self): self.save_dir = './result/step1/' self.result_dir = './result/plot/' os.makedirs(self.result_dir, exist_ok=True) checkpoint_dir = self.save_dir self.max_epoch = 300 self.steps_per_epoch = 1000 self.batch_size = 64 lr = tf.keras.optimizers.schedules.ExponentialDecay(1e-3, 1e5, 0.5) self.optimizer = tf.keras.optimizers.Adam(lr) self.encoder = net.FeatureBlock() self.encoder.summary() self.decoder = net.SimpleDecoderBlock() self.decoder.summary() inputs = { 'image': tf.keras.Input(shape=(128,128,3)), } feature_out = self.encoder(inputs) outputs = self.decoder(feature_out) self.model = tf.keras.Model(inputs, outputs, name='SimpleEncodeDecoder') checkpoint = tf.train.Checkpoint(optimizer=self.optimizer, model=self.model) last = tf.train.latest_checkpoint(checkpoint_dir) checkpoint.restore(last) self.manager = tf.train.CheckpointManager( checkpoint, directory=checkpoint_dir, max_to_keep=2) if not last is None: self.init_epoch = int(os.path.basename(last).split('-')[1]) print('loaded %d epoch'%self.init_epoch) else: self.init_epoch = 0 self.model.summary() def eval(self): self.data = net.FontData() print("Plot: ", self.init_epoch + 1) acc = self.make_plot(self.data.test_data(self.batch_size), (self.init_epoch + 1)) print('acc', acc) @tf.function def eval_substep(self, inputs): input_data = { 'image': inputs['input'], } feature = self.encoder(input_data) outputs = self.decoder(feature) target_id = inputs['index'] target_id1 = inputs['idx1'] target_id2 = inputs['idx2'] pred_id1 = tf.nn.softmax(outputs['id1'], -1) pred_id2 = tf.nn.softmax(outputs['id2'], -1) return { 'feature': feature, 'pred_id1': pred_id1, 'pred_id2': pred_id2, 'target_id': target_id, 'target_id1': target_id1, 'target_id2': target_id2, } def make_plot(self, test_ds, epoch): result = [] labels = [] with open(os.path.join(self.result_dir,'test_result-%d.txt'%epoch),'w') as txt: correct_count = 0 failed_count = 0 with tqdm.tqdm(total=len(self.data.test_keys)) as pbar: for inputs in test_ds: pred = self.eval_substep(inputs) result += [pred['feature']] labels += [pred['target_id']] for i in range(pred['target_id1'].shape[0]): txt.write('---\n') target = pred['target_id'][i].numpy() txt.write('target: id %d = %s\n'%(target, self.data.glyphs[target-1])) predid1 = np.argmax(pred['pred_id1'][i]) predid2 = np.argmax(pred['pred_id2'][i]) predid = predid1 * 100 + predid2 if predid == 0: txt.write('predict: id %d nothing (p=%f)\n'%(predid, pred['pred_id1'][i][predid1] * pred['pred_id2'][i][predid2])) elif predid > self.data.id_count + 1: txt.write('predict: id %d nothing (p=%f)\n'%(predid, pred['pred_id1'][i][predid1] * pred['pred_id2'][i][predid2])) else: txt.write('predict: id %d = %s (p=%f)\n'%(predid, self.data.glyphs[predid-1], pred['pred_id1'][i][predid1] * pred['pred_id2'][i][predid2])) if target == predid: txt.write('Correct!\n') correct_count += 1 else: txt.write('Failed!\n') failed_count += 1 pbar.update(1) acc = correct_count / (correct_count + failed_count) txt.write('==============\n') txt.write('Correct = %d\n'%correct_count) txt.write('Failed = %d\n'%failed_count) txt.write('accuracy = %f\n'%acc) result = np.concatenate(result) labels =
np.concatenate(labels)
numpy.concatenate
# ________ # / # \ / # \ / # \/ import random import textwrap import emd_mean import AdvEMDpy import emd_basis import emd_utils import numpy as np import pandas as pd import cvxpy as cvx import seaborn as sns import matplotlib.pyplot as plt from scipy.integrate import odeint from scipy.ndimage import gaussian_filter from emd_utils import time_extension, Utility from scipy.interpolate import CubicSpline from emd_hilbert import Hilbert, hilbert_spectrum from emd_preprocess import Preprocess from emd_mean import Fluctuation from AdvEMDpy import EMD # alternate packages from PyEMD import EMD as pyemd0215 import emd as emd040 sns.set(style='darkgrid') pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001) pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time) pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series) # plot 0 - addition fig = plt.figure(figsize=(9, 4)) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('First Iteration of Sifting Algorithm') plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1) plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()], c='r', label=r'$M(t_i)$', zorder=2) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4) plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()], c='c', label=r'$m(t_j)$', zorder=3) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5) plt.yticks(ticks=[-2, -1, 0, 1, 2]) plt.xticks(ticks=[0, np.pi, 2 * np.pi], labels=[r'0', r'$\pi$', r'$2\pi$']) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/pseudo_algorithm.png') plt.show() knots = np.arange(12) time = np.linspace(0, 11, 1101) basis = emd_basis.Basis(time=time, time_series=time) b_spline_basis = basis.cubic_b_spline(knots) chsi_basis = basis.chsi_basis(knots) # plot 1 plt.title('Non-Natural Cubic B-Spline Bases at Boundary') plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $') plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $') plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $') plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $') plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $') plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $']) plt.xlim(4.4, 6.6) plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.legend(loc='upper left') plt.savefig('jss_figures/boundary_bases.png') plt.show() # plot 1a - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) knots_uniform = np.linspace(0, 2 * np.pi, 51) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0] fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Uniform Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Uniform Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Uniform Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots_uniform)): axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_uniform.png') plt.show() # plot 1b - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=1, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Statically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Statically Optimised Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Statically Optimised Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots)): axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_1.png') plt.show() # plot 1c - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=2, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Dynamically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Dynamically Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Dynamically Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots[i])): axs[i].plot(knots[i][j] * np.ones(101),
np.linspace(-2, 2, 101)
numpy.linspace
""" Random Variables. This module implements random variables. Random variables are the main in- and outputs of probabilistic numerical methods. """ from typing import Any, Callable, Dict, Generic, Optional, Tuple, TypeVar, Union import numpy as np from probnum import utils as _utils from probnum.type import ( ArrayLikeGetitemArgType, DTypeArgType, FloatArgType, RandomStateArgType, RandomStateType, ShapeArgType, ShapeType, ) try: # functools.cached_property is only available in Python >=3.8 from functools import cached_property except ImportError: from cached_property import cached_property _ValueType = TypeVar("ValueType") class RandomVariable(Generic[_ValueType]): """ Random variables are the main objects used by probabilistic numerical methods. Every probabilistic numerical method takes a random variable encoding the prior distribution as input and outputs a random variable whose distribution encodes the uncertainty arising from finite computation. The generic signature of a probabilistic numerical method is: ``output_rv = probnum_method(input_rv, method_params)`` In practice, most random variables used by methods in ProbNum have Dirac or Gaussian measure. Instances of :class:`RandomVariable` can be added, multiplied, etc. with arrays and linear operators. This may change their ``distribution`` and not necessarily all previously available methods are retained. The internals of :class:`RandomVariable` objects are assumed to be constant over their whole lifecycle. This is due to the caches used to make certain computations more efficient. As a consequence, altering the internal state of a :class:`RandomVariable` (e.g. its mean, cov, sampling function, etc.) will result in undefined behavior. In particular, this should be kept in mind when subclassing :class:`RandomVariable` or any of its descendants. Parameters ---------- shape : Shape of realizations of this random variable. dtype : Data type of realizations of this random variable. If ``object`` will be converted to ``numpy.dtype``. as_value_type : Function which can be used to transform user-supplied arguments, interpreted as realizations of this random variable, to an easy-to-process, normalized format. Will be called internally to transform the argument of functions like ``in_support``, ``cdf`` and ``logcdf``, ``pmf`` and ``logpmf`` (in :class:`DiscreteRandomVariable`), ``pdf`` and ``logpdf`` (in :class:`ContinuousRandomVariable`), and potentially by similar functions in subclasses. For instance, this method is useful if (``log``)``cdf`` and (``log``)``pdf`` both only work on :class:`np.float_` arguments, but we still want the user to be able to pass Python :class:`float`. Then ``as_value_type`` should be set to something like ``lambda x: np.float64(x)``. See Also -------- asrandvar : Transform into a :class:`RandomVariable`. Examples -------- """ # pylint: disable=too-many-instance-attributes,too-many-public-methods def __init__( self, shape: ShapeArgType, dtype: DTypeArgType, random_state: RandomStateArgType = None, parameters: Optional[Dict[str, Any]] = None, sample: Optional[Callable[[ShapeType], _ValueType]] = None, in_support: Optional[Callable[[_ValueType], bool]] = None, cdf: Optional[Callable[[_ValueType], np.float_]] = None, logcdf: Optional[Callable[[_ValueType], np.float_]] = None, quantile: Optional[Callable[[FloatArgType], _ValueType]] = None, mode: Optional[Callable[[], _ValueType]] = None, median: Optional[Callable[[], _ValueType]] = None, mean: Optional[Callable[[], _ValueType]] = None, cov: Optional[Callable[[], _ValueType]] = None, var: Optional[Callable[[], _ValueType]] = None, std: Optional[Callable[[], _ValueType]] = None, entropy: Optional[Callable[[], np.float_]] = None, as_value_type: Optional[Callable[[Any], _ValueType]] = None, ): # pylint: disable=too-many-arguments,too-many-locals """Create a new random variable.""" self.__shape = _utils.as_shape(shape) # Data Types self.__dtype = np.dtype(dtype) self.__median_dtype = RandomVariable.infer_median_dtype(self.__dtype) self.__moment_dtype = RandomVariable.infer_moment_dtype(self.__dtype) self._random_state = _utils.as_random_state(random_state) # Probability distribution of the random variable self.__parameters = parameters.copy() if parameters is not None else {} self.__sample = sample self.__in_support = in_support self.__cdf = cdf self.__logcdf = logcdf self.__quantile = quantile # Properties of the random variable self.__mode = mode self.__median = median self.__mean = mean self.__cov = cov self.__var = var self.__std = std self.__entropy = entropy # Utilities self.__as_value_type = as_value_type def __repr__(self) -> str: return f"<{self.shape} {self.__class__.__name__} with dtype={self.dtype}>" @property def shape(self) -> ShapeType: """Shape of realizations of the random variable.""" return self.__shape @cached_property def ndim(self) -> int: return len(self.__shape) @cached_property def size(self) -> int: return int(
np.prod(self.__shape)
numpy.prod
from abc import ABCMeta, abstractmethod import os from vmaf.tools.misc import make_absolute_path, run_process from vmaf.tools.stats import ListStats __copyright__ = "Copyright 2016-2018, Netflix, Inc." __license__ = "Apache, Version 2.0" import re import numpy as np import ast from vmaf import ExternalProgramCaller, to_list from vmaf.config import VmafConfig, VmafExternalConfig from vmaf.core.executor import Executor from vmaf.core.result import Result from vmaf.tools.reader import YuvReader class FeatureExtractor(Executor): """ FeatureExtractor takes in a list of assets, and run feature extraction on them, and return a list of corresponding results. A FeatureExtractor must specify a unique type and version combination (by the TYPE and VERSION attribute), so that the Result generated by it can be identified. A derived class of FeatureExtractor must: 1) Override TYPE and VERSION 2) Override _generate_result(self, asset), which call a command-line executable and generate feature scores in a log file. 3) Override _get_feature_scores(self, asset), which read the feature scores from the log file, and return the scores in a dictionary format. For an example, follow VmafFeatureExtractor. """ __metaclass__ = ABCMeta @property @abstractmethod def ATOM_FEATURES(self): raise NotImplementedError def _read_result(self, asset): result = {} result.update(self._get_feature_scores(asset)) executor_id = self.executor_id return Result(asset, executor_id, result) @classmethod def get_scores_key(cls, atom_feature): return "{type}_{atom_feature}_scores".format( type=cls.TYPE, atom_feature=atom_feature) @classmethod def get_score_key(cls, atom_feature): return "{type}_{atom_feature}_score".format( type=cls.TYPE, atom_feature=atom_feature) def _get_feature_scores(self, asset): # routine to read the feature scores from the log file, and return # the scores in a dictionary format. log_file_path = self._get_log_file_path(asset) atom_feature_scores_dict = {} atom_feature_idx_dict = {} for atom_feature in self.ATOM_FEATURES: atom_feature_scores_dict[atom_feature] = [] atom_feature_idx_dict[atom_feature] = 0 with open(log_file_path, 'rt') as log_file: for line in log_file.readlines(): for atom_feature in self.ATOM_FEATURES: re_template = "{af}: ([0-9]+) ([a-zA-Z0-9.-]+)".format(af=atom_feature) mo = re.match(re_template, line) if mo: cur_idx = int(mo.group(1)) assert cur_idx == atom_feature_idx_dict[atom_feature] # parse value, allowing NaN and inf val = float(mo.group(2)) if np.isnan(val) or np.isinf(val): val = None atom_feature_scores_dict[atom_feature].append(val) atom_feature_idx_dict[atom_feature] += 1 continue len_score = len(atom_feature_scores_dict[self.ATOM_FEATURES[0]]) assert len_score != 0 for atom_feature in self.ATOM_FEATURES[1:]: assert len_score == len(atom_feature_scores_dict[atom_feature]), \ "Feature data possibly corrupt. Run cleanup script and try again." feature_result = {} for atom_feature in self.ATOM_FEATURES: scores_key = self.get_scores_key(atom_feature) feature_result[scores_key] = atom_feature_scores_dict[atom_feature] return feature_result class VmafFeatureExtractor(FeatureExtractor): TYPE = "VMAF_feature" # VERSION = '0.1' # vmaf_study; Anush's VIF fix # VERSION = '0.2' # expose vif_num, vif_den, adm_num, adm_den, anpsnr # VERSION = '0.2.1' # expose vif num/den of each scale # VERSION = '0.2.2' # adm abs-->fabs, corrected border handling, uniform reading with option of offset for input YUV, updated VIF corner case # VERSION = '0.2.2b' # expose adm_den/num_scalex # VERSION = '0.2.3' # AVX for VMAF convolution; update adm features by folding noise floor into per coef # VERSION = '0.2.4' # Fix a bug in adm feature passing scale into dwt_quant_step # VERSION = '0.2.4b' # Modify by adding ADM noise floor outside cube root; add derived feature motion2 VERSION = '0.2.4c' # Modify by moving motion2 to c code ATOM_FEATURES = ['vif', 'adm', 'ansnr', 'motion', 'motion2', 'vif_num', 'vif_den', 'adm_num', 'adm_den', 'anpsnr', 'vif_num_scale0', 'vif_den_scale0', 'vif_num_scale1', 'vif_den_scale1', 'vif_num_scale2', 'vif_den_scale2', 'vif_num_scale3', 'vif_den_scale3', 'adm_num_scale0', 'adm_den_scale0', 'adm_num_scale1', 'adm_den_scale1', 'adm_num_scale2', 'adm_den_scale2', 'adm_num_scale3', 'adm_den_scale3', ] DERIVED_ATOM_FEATURES = ['vif_scale0', 'vif_scale1', 'vif_scale2', 'vif_scale3', 'vif2', 'adm2', 'adm3', 'adm_scale0', 'adm_scale1', 'adm_scale2', 'adm_scale3', ] ADM2_CONSTANT = 0 ADM_SCALE_CONSTANT = 0 def _generate_result(self, asset): # routine to call the command-line executable and generate feature # scores in the log file. quality_width, quality_height = asset.quality_width_height log_file_path = self._get_log_file_path(asset) yuv_type=self._get_workfile_yuv_type(asset) ref_path=asset.ref_workfile_path dis_path=asset.dis_workfile_path w=quality_width h=quality_height logger = self.logger ExternalProgramCaller.call_vmaf_feature(yuv_type, ref_path, dis_path, w, h, log_file_path, logger) @classmethod def _post_process_result(cls, result): # override Executor._post_process_result result = super(VmafFeatureExtractor, cls)._post_process_result(result) # adm2 = # (adm_num + ADM2_CONSTANT) / (adm_den + ADM2_CONSTANT) adm2_scores_key = cls.get_scores_key('adm2') adm_num_scores_key = cls.get_scores_key('adm_num') adm_den_scores_key = cls.get_scores_key('adm_den') result.result_dict[adm2_scores_key] = list( (np.array(result.result_dict[adm_num_scores_key]) + cls.ADM2_CONSTANT) / (np.array(result.result_dict[adm_den_scores_key]) + cls.ADM2_CONSTANT) ) # vif_scalei = vif_num_scalei / vif_den_scalei, i = 0, 1, 2, 3 vif_num_scale0_scores_key = cls.get_scores_key('vif_num_scale0') vif_den_scale0_scores_key = cls.get_scores_key('vif_den_scale0') vif_num_scale1_scores_key = cls.get_scores_key('vif_num_scale1') vif_den_scale1_scores_key = cls.get_scores_key('vif_den_scale1') vif_num_scale2_scores_key = cls.get_scores_key('vif_num_scale2') vif_den_scale2_scores_key = cls.get_scores_key('vif_den_scale2') vif_num_scale3_scores_key = cls.get_scores_key('vif_num_scale3') vif_den_scale3_scores_key = cls.get_scores_key('vif_den_scale3') vif_scale0_scores_key = cls.get_scores_key('vif_scale0') vif_scale1_scores_key = cls.get_scores_key('vif_scale1') vif_scale2_scores_key = cls.get_scores_key('vif_scale2') vif_scale3_scores_key = cls.get_scores_key('vif_scale3') result.result_dict[vif_scale0_scores_key] = list( (np.array(result.result_dict[vif_num_scale0_scores_key]) / np.array(result.result_dict[vif_den_scale0_scores_key])) ) result.result_dict[vif_scale1_scores_key] = list( (np.array(result.result_dict[vif_num_scale1_scores_key]) / np.array(result.result_dict[vif_den_scale1_scores_key])) ) result.result_dict[vif_scale2_scores_key] = list( (np.array(result.result_dict[vif_num_scale2_scores_key]) / np.array(result.result_dict[vif_den_scale2_scores_key])) ) result.result_dict[vif_scale3_scores_key] = list( (np.array(result.result_dict[vif_num_scale3_scores_key]) / np.array(result.result_dict[vif_den_scale3_scores_key])) ) # vif2 = # ((vif_num_scale0 / vif_den_scale0) + (vif_num_scale1 / vif_den_scale1) + # (vif_num_scale2 / vif_den_scale2) + (vif_num_scale3 / vif_den_scale3)) / 4.0 vif_scores_key = cls.get_scores_key('vif2') result.result_dict[vif_scores_key] = list( ( (
np.array(result.result_dict[vif_num_scale0_scores_key])
numpy.array
# # Copyright (c) 2021 The GPflux Contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import abc import numpy as np import pytest import tensorflow as tf import tensorflow_probability as tfp from gpflow.kullback_leiblers import gauss_kl from gpflux.encoders import DirectlyParameterizedNormalDiag from gpflux.layers import LatentVariableLayer, LayerWithObservations, TrackableLayer tf.keras.backend.set_floatx("float64") ############ # Utilities ############ def _zero_one_normal_prior(w_dim): """ N(0, I) prior """ return tfp.distributions.MultivariateNormalDiag(loc=np.zeros(w_dim), scale_diag=np.ones(w_dim)) def get_distributions_with_w_dim(): distributions = [] for d in [1, 5]: mean = np.zeros(d) scale_tri_l = np.eye(d) mvn = tfp.distributions.MultivariateNormalTriL(mean, scale_tri_l) std = np.ones(d) mvn_diag = tfp.distributions.MultivariateNormalDiag(mean, std) distributions.append((mvn, d)) distributions.append((mvn_diag, d)) return distributions ############ # Tests ############ @pytest.mark.parametrize("distribution, w_dim", get_distributions_with_w_dim()) def test_local_kls(distribution, w_dim): lv = LatentVariableLayer(encoder=None, prior=distribution) # test kl is 0 when posteriors == priors posterior = distribution assert lv._local_kls(posterior) == 0 # test kl > 0 when posteriors != priors batch_size = 10 params = distribution.parameters posterior_params = { k: [v + 0.5 for _ in range(batch_size)] for k, v in params.items() if isinstance(v, np.ndarray) } posterior = lv.distribution_class(**posterior_params) local_kls = lv._local_kls(posterior) assert np.all(local_kls > 0) assert local_kls.shape == (batch_size,) @pytest.mark.parametrize("w_dim", [1, 5]) def test_local_kl_gpflow_consistency(w_dim): num_data = 400 means = np.random.randn(num_data, w_dim) encoder = DirectlyParameterizedNormalDiag(num_data, w_dim, means) lv = LatentVariableLayer(encoder=encoder, prior=_zero_one_normal_prior(w_dim)) posteriors = lv._inference_posteriors( [np.random.randn(num_data, 3), np.random.randn(num_data, 2)] ) q_mu = posteriors.parameters["loc"] q_sqrt = posteriors.parameters["scale_diag"] gpflow_local_kls = gauss_kl(q_mu, q_sqrt) tfp_local_kls = tf.reduce_sum(lv._local_kls(posteriors)) np.testing.assert_allclose(tfp_local_kls, gpflow_local_kls, rtol=1e-10) class ArrayMatcher: def __init__(self, expected): self.expected = expected def __eq__(self, actual): return np.allclose(actual, self.expected, equal_nan=True) @pytest.mark.parametrize("w_dim", [1, 5]) def test_latent_variable_layer_losses(mocker, w_dim): num_data, x_dim, y_dim = 43, 3, 1 prior_shape = (w_dim,) posteriors_shape = (num_data, w_dim) prior = tfp.distributions.MultivariateNormalDiag( loc=np.random.randn(*prior_shape), scale_diag=np.random.randn(*prior_shape) ** 2, ) posteriors = tfp.distributions.MultivariateNormalDiag( loc=np.random.randn(*posteriors_shape), scale_diag=np.random.randn(*posteriors_shape) ** 2, ) encoder = mocker.Mock(return_value=(posteriors.loc, posteriors.scale.diag)) lv = LatentVariableLayer(encoder=encoder, prior=prior) inputs = np.full((num_data, x_dim), np.nan) targets = np.full((num_data, y_dim), np.nan) observations = [inputs, targets] encoder_inputs = np.concatenate(observations, axis=-1) _ = lv(inputs) encoder.assert_not_called() assert lv.losses == [0.0] _ = lv(inputs, observations=observations, training=True) # assert_called_once_with uses == for comparison which fails on arrays encoder.assert_called_once_with(ArrayMatcher(encoder_inputs), training=True) expected_loss = [tf.reduce_mean(posteriors.kl_divergence(prior))] np.testing.assert_equal(lv.losses, expected_loss) # also checks shapes match @pytest.mark.parametrize("w_dim", [1, 5]) @pytest.mark.parametrize("seed2", [None, 42]) def test_latent_variable_layer_samples(mocker, test_data, w_dim, seed2): seed = 123 inputs, targets = test_data num_data, x_dim = inputs.shape prior_shape = (w_dim,) posteriors_shape = (num_data, w_dim) prior = tfp.distributions.MultivariateNormalDiag( loc=np.random.randn(*prior_shape), scale_diag=np.random.randn(*prior_shape) ** 2, ) posteriors = tfp.distributions.MultivariateNormalDiag( loc=
np.random.randn(*posteriors_shape)
numpy.random.randn
''' <NAME> set up :2020-1-9 intergrate img and label into one file -- fiducial1024_v1 ''' import argparse import sys, os import pickle import random import collections import json import numpy as np import scipy.io as io import scipy.misc as m import matplotlib.pyplot as plt import glob import math import time import threading import multiprocessing as mp from multiprocessing import Pool import re import cv2 # sys.path.append('/lustre/home/gwxie/hope/project/dewarp/datasets/') # /lustre/home/gwxie/program/project/unwarp/perturbed_imgaes/GAN import utils def getDatasets(dir): return os.listdir(dir) class perturbed(utils.BasePerturbed): def __init__(self, path, bg_path, save_path, save_suffix): self.path = path self.bg_path = bg_path self.save_path = save_path self.save_suffix = save_suffix def save_img(self, m, n, fold_curve='fold', repeat_time=4, fiducial_points = 16, relativeShift_position='relativeShift_v2'): origin_img = cv2.imread(self.path, flags=cv2.IMREAD_COLOR) save_img_shape = [512*2, 480*2] # 320 # reduce_value = np.random.choice([2**4, 2**5, 2**6, 2**7, 2**8], p=[0.01, 0.1, 0.4, 0.39, 0.1]) reduce_value = np.random.choice([2*2, 4*2, 8*2, 16*2, 24*2, 32*2, 40*2, 48*2], p=[0.02, 0.18, 0.2, 0.3, 0.1, 0.1, 0.08, 0.02]) # reduce_value = np.random.choice([8*2, 16*2, 24*2, 32*2, 40*2, 48*2], p=[0.01, 0.02, 0.2, 0.4, 0.19, 0.18]) # reduce_value = np.random.choice([16, 24, 32, 40, 48, 64], p=[0.01, 0.1, 0.2, 0.4, 0.2, 0.09]) base_img_shrink = save_img_shape[0] - reduce_value # enlarge_img_shrink = [1024, 768] # enlarge_img_shrink = [896, 672] # 420 enlarge_img_shrink = [512*4, 480*4] # 420 # enlarge_img_shrink = [896*2, 768*2] # 420 # enlarge_img_shrink = [896, 768] # 420 # enlarge_img_shrink = [768, 576] # 420 # enlarge_img_shrink = [640, 480] # 420 '''''' im_lr = origin_img.shape[0] im_ud = origin_img.shape[1] reduce_value_v2 = np.random.choice([2*2, 4*2, 8*2, 16*2, 24*2, 28*2, 32*2, 48*2], p=[0.02, 0.18, 0.2, 0.2, 0.1, 0.1, 0.1, 0.1]) # reduce_value_v2 = np.random.choice([16, 24, 28, 32, 48, 64], p=[0.01, 0.1, 0.2, 0.3, 0.25, 0.14]) if im_lr > im_ud: im_ud = min(int(im_ud / im_lr * base_img_shrink), save_img_shape[1] - reduce_value_v2) im_lr = save_img_shape[0] - reduce_value else: base_img_shrink = save_img_shape[1] - reduce_value im_lr = min(int(im_lr / im_ud * base_img_shrink), save_img_shape[0] - reduce_value_v2) im_ud = base_img_shrink if round(im_lr / im_ud, 2) < 0.5 or round(im_ud / im_lr, 2) < 0.5: repeat_time = min(repeat_time, 8) edge_padding = 3 im_lr -= im_lr % (fiducial_points-1) - (2*edge_padding) # im_lr % (fiducial_points-1) - 1 im_ud -= im_ud % (fiducial_points-1) - (2*edge_padding) # im_ud % (fiducial_points-1) - 1 im_hight = np.linspace(edge_padding, im_lr - edge_padding, fiducial_points, dtype=np.int64) im_wide = np.linspace(edge_padding, im_ud - edge_padding, fiducial_points, dtype=np.int64) # im_lr -= im_lr % (fiducial_points-1) - (1+2*edge_padding) # im_lr % (fiducial_points-1) - 1 # im_ud -= im_ud % (fiducial_points-1) - (1+2*edge_padding) # im_ud % (fiducial_points-1) - 1 # im_hight = np.linspace(edge_padding, im_lr - (1+edge_padding), fiducial_points, dtype=np.int64) # im_wide = np.linspace(edge_padding, im_ud - (1+edge_padding), fiducial_points, dtype=np.int64) im_x, im_y = np.meshgrid(im_hight, im_wide) segment_x = (im_lr) // (fiducial_points-1) segment_y = (im_ud) // (fiducial_points-1) # plt.plot(im_x, im_y, # color='limegreen', # marker='.', # linestyle='') # plt.grid(True) # plt.show() self.origin_img = cv2.resize(origin_img, (im_ud, im_lr), interpolation=cv2.INTER_CUBIC) perturbed_bg_ = getDatasets(self.bg_path) perturbed_bg_img_ = self.bg_path+random.choice(perturbed_bg_) perturbed_bg_img = cv2.imread(perturbed_bg_img_, flags=cv2.IMREAD_COLOR) mesh_shape = self.origin_img.shape[:2] self.synthesis_perturbed_img = np.full((enlarge_img_shrink[0], enlarge_img_shrink[1], 3), 256, dtype=np.float32)#np.zeros_like(perturbed_bg_img) # self.synthesis_perturbed_img = np.full((enlarge_img_shrink[0], enlarge_img_shrink[1], 3), 0, dtype=np.int16)#np.zeros_like(perturbed_bg_img) self.new_shape = self.synthesis_perturbed_img.shape[:2] perturbed_bg_img = cv2.resize(perturbed_bg_img, (save_img_shape[1], save_img_shape[0]), cv2.INPAINT_TELEA) origin_pixel_position = np.argwhere(np.zeros(mesh_shape, dtype=np.uint32) == 0).reshape(mesh_shape[0], mesh_shape[1], 2) pixel_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape(self.new_shape[0], self.new_shape[1], 2) self.perturbed_xy_ = np.zeros((self.new_shape[0], self.new_shape[1], 2)) # self.perturbed_xy_ = pixel_position.copy().astype(np.float32) # fiducial_points_grid = origin_pixel_position[im_x, im_y] self.synthesis_perturbed_label = np.zeros((self.new_shape[0], self.new_shape[1], 2)) x_min, y_min, x_max, y_max = self.adjust_position_v2(0, 0, mesh_shape[0], mesh_shape[1], save_img_shape) origin_pixel_position += [x_min, y_min] x_min, y_min, x_max, y_max = self.adjust_position(0, 0, mesh_shape[0], mesh_shape[1]) x_shift = random.randint(-enlarge_img_shrink[0]//16, enlarge_img_shrink[0]//16) y_shift = random.randint(-enlarge_img_shrink[1]//16, enlarge_img_shrink[1]//16) x_min += x_shift x_max += x_shift y_min += y_shift y_max += y_shift '''im_x,y''' im_x += x_min im_y += y_min self.synthesis_perturbed_img[x_min:x_max, y_min:y_max] = self.origin_img self.synthesis_perturbed_label[x_min:x_max, y_min:y_max] = origin_pixel_position synthesis_perturbed_img_map = self.synthesis_perturbed_img.copy() synthesis_perturbed_label_map = self.synthesis_perturbed_label.copy() foreORbackground_label = np.full((mesh_shape), 1, dtype=np.int16) foreORbackground_label_map = np.full((self.new_shape), 0, dtype=np.int16) foreORbackground_label_map[x_min:x_max, y_min:y_max] = foreORbackground_label # synthesis_perturbed_img_map = self.pad(self.synthesis_perturbed_img.copy(), x_min, y_min, x_max, y_max) # synthesis_perturbed_label_map = self.pad(synthesis_perturbed_label_map, x_min, y_min, x_max, y_max) '''*****************************************************************''' is_normalizationFun_mixture = self.is_perform(0.2, 0.8) # if not is_normalizationFun_mixture: normalizationFun_0_1 = False # normalizationFun_0_1 = self.is_perform(0.5, 0.5) if fold_curve == 'fold': fold_curve_random = True # is_normalizationFun_mixture = False normalizationFun_0_1 = self.is_perform(0.2, 0.8) if is_normalizationFun_mixture: alpha_perturbed = random.randint(80, 120) / 100 else: if normalizationFun_0_1 and repeat_time < 8: alpha_perturbed = random.randint(50, 70) / 100 else: alpha_perturbed = random.randint(70, 130) / 100 else: fold_curve_random = self.is_perform(0.1, 0.9) # False # self.is_perform(0.01, 0.99) alpha_perturbed = random.randint(80, 160) / 100 # is_normalizationFun_mixture = False # self.is_perform(0.01, 0.99) synthesis_perturbed_img = np.full_like(self.synthesis_perturbed_img, 256) # synthesis_perturbed_img = np.full_like(self.synthesis_perturbed_img, 0, dtype=np.int16) synthesis_perturbed_label = np.zeros_like(self.synthesis_perturbed_label) alpha_perturbed_change = self.is_perform(0.5, 0.5) p_pp_choice = self.is_perform(0.8, 0.2) if fold_curve == 'fold' else self.is_perform(0.1, 0.9) for repeat_i in range(repeat_time): if alpha_perturbed_change: if fold_curve == 'fold': if is_normalizationFun_mixture: alpha_perturbed = random.randint(80, 120) / 100 else: if normalizationFun_0_1 and repeat_time < 8: alpha_perturbed = random.randint(50, 70) / 100 else: alpha_perturbed = random.randint(70, 130) / 100 else: alpha_perturbed = random.randint(80, 160) / 100 '''''' linspace_x = [0, (self.new_shape[0] - im_lr) // 2 - 1, self.new_shape[0] - (self.new_shape[0] - im_lr) // 2 - 1, self.new_shape[0] - 1] linspace_y = [0, (self.new_shape[1] - im_ud) // 2 - 1, self.new_shape[1] - (self.new_shape[1] - im_ud) // 2 - 1, self.new_shape[1] - 1] linspace_x_seq = [1, 2, 3] linspace_y_seq = [1, 2, 3] r_x = random.choice(linspace_x_seq) r_y = random.choice(linspace_y_seq) perturbed_p = np.array( [random.randint(linspace_x[r_x-1] * 10, linspace_x[r_x] * 10), random.randint(linspace_y[r_y-1] * 10, linspace_y[r_y] * 10)])/10 if ((r_x == 1 or r_x == 3) and (r_y == 1 or r_y == 3)) and p_pp_choice: linspace_x_seq.remove(r_x) linspace_y_seq.remove(r_y) r_x = random.choice(linspace_x_seq) r_y = random.choice(linspace_y_seq) perturbed_pp = np.array( [random.randint(linspace_x[r_x-1] * 10, linspace_x[r_x] * 10), random.randint(linspace_y[r_y-1] * 10, linspace_y[r_y] * 10)])/10 # perturbed_p, perturbed_pp = np.array( # [random.randint(0, self.new_shape[0] * 10) / 10, # random.randint(0, self.new_shape[1] * 10) / 10]) \ # , np.array([random.randint(0, self.new_shape[0] * 10) / 10, # random.randint(0, self.new_shape[1] * 10) / 10]) # perturbed_p, perturbed_pp = np.array( # [random.randint((self.new_shape[0]-im_lr)//2*10, (self.new_shape[0]-(self.new_shape[0]-im_lr)//2) * 10) / 10, # random.randint((self.new_shape[1]-im_ud)//2*10, (self.new_shape[1]-(self.new_shape[1]-im_ud)//2) * 10) / 10]) \ # , np.array([random.randint((self.new_shape[0]-im_lr)//2*10, (self.new_shape[0]-(self.new_shape[0]-im_lr)//2) * 10) / 10, # random.randint((self.new_shape[1]-im_ud)//2*10, (self.new_shape[1]-(self.new_shape[1]-im_ud)//2) * 10) / 10]) '''''' perturbed_vp = perturbed_pp - perturbed_p perturbed_vp_norm = np.linalg.norm(perturbed_vp) perturbed_distance_vertex_and_line = np.dot((perturbed_p - pixel_position), perturbed_vp) / perturbed_vp_norm '''''' # perturbed_v = np.array([random.randint(-3000, 3000) / 100, random.randint(-3000, 3000) / 100]) # perturbed_v = np.array([random.randint(-4000, 4000) / 100, random.randint(-4000, 4000) / 100]) if fold_curve == 'fold' and self.is_perform(0.6, 0.4): # self.is_perform(0.3, 0.7): # perturbed_v = np.array([random.randint(-9000, 9000) / 100, random.randint(-9000, 9000) / 100]) perturbed_v = np.array([random.randint(-10000, 10000) / 100, random.randint(-10000, 10000) / 100]) # perturbed_v = np.array([random.randint(-11000, 11000) / 100, random.randint(-11000, 11000) / 100]) else: # perturbed_v = np.array([random.randint(-9000, 9000) / 100, random.randint(-9000, 9000) / 100]) # perturbed_v = np.array([random.randint(-16000, 16000) / 100, random.randint(-16000, 16000) / 100]) perturbed_v = np.array([random.randint(-8000, 8000) / 100, random.randint(-8000, 8000) / 100]) # perturbed_v = np.array([random.randint(-3500, 3500) / 100, random.randint(-3500, 3500) / 100]) # perturbed_v = np.array([random.randint(-600, 600) / 10, random.randint(-600, 600) / 10]) '''''' if fold_curve == 'fold': if is_normalizationFun_mixture: if self.is_perform(0.5, 0.5): perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line)) else: perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), random.randint(1, 2)) else: if normalizationFun_0_1: perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), 2) else: perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line)) else: if is_normalizationFun_mixture: if self.is_perform(0.5, 0.5): perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line)) else: perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), random.randint(1, 2)) else: if normalizationFun_0_1: perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), 2) else: perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line)) '''''' if fold_curve_random: # omega_perturbed = (alpha_perturbed+0.2) / (perturbed_d + alpha_perturbed) # omega_perturbed = alpha_perturbed**perturbed_d omega_perturbed = alpha_perturbed / (perturbed_d + alpha_perturbed) else: omega_perturbed = 1 - perturbed_d ** alpha_perturbed '''shadow''' if self.is_perform(0.6, 0.4): synthesis_perturbed_img_map[x_min:x_max, y_min:y_max] = np.minimum(np.maximum(synthesis_perturbed_img_map[x_min:x_max, y_min:y_max] - np.int16(np.round(omega_perturbed[x_min:x_max, y_min:y_max].repeat(3).reshape(x_max-x_min, y_max-y_min, 3) * abs(np.linalg.norm(perturbed_v//2))*np.array([0.4-random.random()*0.1, 0.4-random.random()*0.1, 0.4-random.random()*0.1]))), 0), 255) '''''' if relativeShift_position in ['position', 'relativeShift_v2']: self.perturbed_xy_ += np.array([omega_perturbed * perturbed_v[0], omega_perturbed * perturbed_v[1]]).transpose(1, 2, 0) else: print('relativeShift_position error') exit() ''' flat_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape( self.new_shape[0] * self.new_shape[1], 2) vtx, wts = self.interp_weights(self.perturbed_xy_.reshape(self.new_shape[0] * self.new_shape[1], 2), flat_position) wts_sum = np.abs(wts).sum(-1) # flat_img.reshape(flat_shape[0] * flat_shape[1], 3)[:] = interpolate(pixel, vtx, wts) wts = wts[wts_sum <= 1, :] vtx = vtx[wts_sum <= 1, :] synthesis_perturbed_img.reshape(self.new_shape[0] * self.new_shape[1], 3)[wts_sum <= 1, :] = self.interpolate(synthesis_perturbed_img_map.reshape(self.new_shape[0] * self.new_shape[1], 3), vtx, wts) synthesis_perturbed_label.reshape(self.new_shape[0] * self.new_shape[1], 2)[wts_sum <= 1, :] = self.interpolate(synthesis_perturbed_label_map.reshape(self.new_shape[0] * self.new_shape[1], 2), vtx, wts) foreORbackground_label = np.zeros(self.new_shape) foreORbackground_label.reshape(self.new_shape[0] * self.new_shape[1], 1)[wts_sum <= 1, :] = self.interpolate(foreORbackground_label_map.reshape(self.new_shape[0] * self.new_shape[1], 1), vtx, wts) foreORbackground_label[foreORbackground_label < 0.99] = 0 foreORbackground_label[foreORbackground_label >= 0.99] = 1 # synthesis_perturbed_img = np.around(synthesis_perturbed_img).astype(np.uint8) synthesis_perturbed_label[:, :, 0] *= foreORbackground_label synthesis_perturbed_label[:, :, 1] *= foreORbackground_label synthesis_perturbed_img[:, :, 0] *= foreORbackground_label synthesis_perturbed_img[:, :, 1] *= foreORbackground_label synthesis_perturbed_img[:, :, 2] *= foreORbackground_label self.synthesis_perturbed_img = synthesis_perturbed_img self.synthesis_perturbed_label = synthesis_perturbed_label ''' '''perspective''' perspective_shreshold = random.randint(26, 36)*10 # 280 x_min_per, y_min_per, x_max_per, y_max_per = self.adjust_position(perspective_shreshold, perspective_shreshold, self.new_shape[0]-perspective_shreshold, self.new_shape[1]-perspective_shreshold) pts1 = np.float32([[x_min_per, y_min_per], [x_max_per, y_min_per], [x_min_per, y_max_per], [x_max_per, y_max_per]]) e_1_ = x_max_per - x_min_per e_2_ = y_max_per - y_min_per e_3_ = e_2_ e_4_ = e_1_ perspective_shreshold_h = e_1_*0.02 perspective_shreshold_w = e_2_*0.02 a_min_, a_max_ = 70, 110 # if self.is_perform(1, 0): if fold_curve == 'curve' and self.is_perform(0.5, 0.5): if self.is_perform(0.5, 0.5): while True: pts2 = np.around( np.float32([[x_min_per - (random.random()) * perspective_shreshold, y_min_per + (random.random()) * perspective_shreshold], [x_max_per - (random.random()) * perspective_shreshold, y_min_per - (random.random()) * perspective_shreshold], [x_min_per + (random.random()) * perspective_shreshold, y_max_per + (random.random()) * perspective_shreshold], [x_max_per + (random.random()) * perspective_shreshold, y_max_per - (random.random()) * perspective_shreshold]])) # right e_1 = np.linalg.norm(pts2[0]-pts2[1]) e_2 = np.linalg.norm(pts2[0]-pts2[2]) e_3 = np.linalg.norm(pts2[1]-pts2[3]) e_4 = np.linalg.norm(pts2[2]-pts2[3]) if e_1_+perspective_shreshold_h > e_1 and e_2_+perspective_shreshold_w > e_2 and e_3_+perspective_shreshold_w > e_3 and e_4_+perspective_shreshold_h > e_4 and \ e_1_ - perspective_shreshold_h < e_1 and e_2_ - perspective_shreshold_w < e_2 and e_3_ - perspective_shreshold_w < e_3 and e_4_ - perspective_shreshold_h < e_4 and \ abs(e_1-e_4) < perspective_shreshold_h and abs(e_2-e_3) < perspective_shreshold_w: a0_, a1_, a2_, a3_ = self.get_angle_4(pts2) if (a0_ > a_min_ and a0_ < a_max_) or (a1_ > a_min_ and a1_ < a_max_) or (a2_ > a_min_ and a2_ < a_max_) or (a3_ > a_min_ and a3_ < a_max_): break else: while True: pts2 = np.around( np.float32([[x_min_per + (random.random()) * perspective_shreshold, y_min_per - (random.random()) * perspective_shreshold], [x_max_per + (random.random()) * perspective_shreshold, y_min_per + (random.random()) * perspective_shreshold], [x_min_per - (random.random()) * perspective_shreshold, y_max_per - (random.random()) * perspective_shreshold], [x_max_per - (random.random()) * perspective_shreshold, y_max_per + (random.random()) * perspective_shreshold]])) e_1 = np.linalg.norm(pts2[0]-pts2[1]) e_2 = np.linalg.norm(pts2[0]-pts2[2]) e_3 = np.linalg.norm(pts2[1]-pts2[3]) e_4 = np.linalg.norm(pts2[2]-pts2[3]) if e_1_+perspective_shreshold_h > e_1 and e_2_+perspective_shreshold_w > e_2 and e_3_+perspective_shreshold_w > e_3 and e_4_+perspective_shreshold_h > e_4 and \ e_1_ - perspective_shreshold_h < e_1 and e_2_ - perspective_shreshold_w < e_2 and e_3_ - perspective_shreshold_w < e_3 and e_4_ - perspective_shreshold_h < e_4 and \ abs(e_1-e_4) < perspective_shreshold_h and abs(e_2-e_3) < perspective_shreshold_w: a0_, a1_, a2_, a3_ = self.get_angle_4(pts2) if (a0_ > a_min_ and a0_ < a_max_) or (a1_ > a_min_ and a1_ < a_max_) or (a2_ > a_min_ and a2_ < a_max_) or (a3_ > a_min_ and a3_ < a_max_): break else: while True: pts2 = np.around(np.float32([[x_min_per+(random.random()-0.5)*perspective_shreshold, y_min_per+(random.random()-0.5)*perspective_shreshold], [x_max_per+(random.random()-0.5)*perspective_shreshold, y_min_per+(random.random()-0.5)*perspective_shreshold], [x_min_per+(random.random()-0.5)*perspective_shreshold, y_max_per+(random.random()-0.5)*perspective_shreshold], [x_max_per+(random.random()-0.5)*perspective_shreshold, y_max_per+(random.random()-0.5)*perspective_shreshold]])) e_1 = np.linalg.norm(pts2[0]-pts2[1]) e_2 =
np.linalg.norm(pts2[0]-pts2[2])
numpy.linalg.norm
""" Collection of tests asserting things that should be true for any index subclass. Makes use of the `indices` fixture defined in pandas/tests/indexes/conftest.py. """ import re import numpy as np import pytest from pandas._libs.tslibs import iNaT from pandas.core.dtypes.common import is_period_dtype, needs_i8_conversion import pandas as pd from pandas import ( CategoricalIndex, DatetimeIndex, MultiIndex, PeriodIndex, RangeIndex, TimedeltaIndex, ) import pandas._testing as tm class TestCommon: def test_droplevel(self, index): # GH 21115 if isinstance(index, MultiIndex): # Tested separately in test_multi.py return assert index.droplevel([]).equals(index) for level in index.name, [index.name]: if isinstance(index.name, tuple) and level is index.name: # GH 21121 : droplevel with tuple name continue with pytest.raises(ValueError): index.droplevel(level) for level in "wrong", ["wrong"]: with pytest.raises( KeyError, match=r"'Requested level \(wrong\) does not match index name \(None\)'", ): index.droplevel(level) def test_constructor_non_hashable_name(self, index): # GH 20527 if isinstance(index, MultiIndex): pytest.skip("multiindex handled in test_multi.py") message = "Index.name must be a hashable type" renamed = [["1"]] # With .rename() with pytest.raises(TypeError, match=message): index.rename(name=renamed) # With .set_names() with pytest.raises(TypeError, match=message): index.set_names(names=renamed) def test_constructor_unwraps_index(self, index): if isinstance(index, pd.MultiIndex): raise pytest.skip("MultiIndex has no ._data") a = index b = type(a)(a) tm.assert_equal(a._data, b._data) @pytest.mark.parametrize("itm", [101, "no_int"]) # FutureWarning from non-tuple sequence of nd indexing @pytest.mark.filterwarnings("ignore::FutureWarning") def test_getitem_error(self, index, itm): with pytest.raises(IndexError): index[itm] @pytest.mark.parametrize( "fname, sname, expected_name", [ ("A", "A", "A"), ("A", "B", None), ("A", None, None), (None, "B", None), (None, None, None), ], ) def test_corner_union(self, index, fname, sname, expected_name): # GH 9943 9862 # Test unions with various name combinations # Do not test MultiIndex or repeats if isinstance(index, MultiIndex) or not index.is_unique: pytest.skip("Not for MultiIndex or repeated indices") # Test copy.union(copy) first = index.copy().set_names(fname) second = index.copy().set_names(sname) union = first.union(second) expected = index.copy().set_names(expected_name) tm.assert_index_equal(union, expected) # Test copy.union(empty) first = index.copy().set_names(fname) second = index.drop(index).set_names(sname) union = first.union(second) expected = index.copy().set_names(expected_name) tm.assert_index_equal(union, expected) # Test empty.union(copy) first = index.drop(index).set_names(fname) second = index.copy().set_names(sname) union = first.union(second) expected = index.copy().set_names(expected_name) tm.assert_index_equal(union, expected) # Test empty.union(empty) first = index.drop(index).set_names(fname) second = index.drop(index).set_names(sname) union = first.union(second) expected = index.drop(index).set_names(expected_name) tm.assert_index_equal(union, expected) @pytest.mark.parametrize( "fname, sname, expected_name", [ ("A", "A", "A"), ("A", "B", None), ("A", None, None), (None, "B", None), (None, None, None), ], ) def test_union_unequal(self, index, fname, sname, expected_name): if isinstance(index, MultiIndex) or not index.is_unique: pytest.skip("Not for MultiIndex or repeated indices") # test copy.union(subset) - need sort for unicode and string first = index.copy().set_names(fname) second = index[1:].set_names(sname) union = first.union(second).sort_values() expected = index.set_names(expected_name).sort_values() tm.assert_index_equal(union, expected) @pytest.mark.parametrize( "fname, sname, expected_name", [ ("A", "A", "A"), ("A", "B", None), ("A", None, None), (None, "B", None), (None, None, None), ], ) def test_corner_intersect(self, index, fname, sname, expected_name): # GH35847 # Test intersections with various name combinations if isinstance(index, MultiIndex) or not index.is_unique: pytest.skip("Not for MultiIndex or repeated indices") # Test copy.intersection(copy) first = index.copy().set_names(fname) second = index.copy().set_names(sname) intersect = first.intersection(second) expected = index.copy().set_names(expected_name) tm.assert_index_equal(intersect, expected) # Test copy.intersection(empty) first = index.copy().set_names(fname) second = index.drop(index).set_names(sname) intersect = first.intersection(second) expected = index.drop(index).set_names(expected_name) tm.assert_index_equal(intersect, expected) # Test empty.intersection(copy) first = index.drop(index).set_names(fname) second = index.copy().set_names(sname) intersect = first.intersection(second) expected = index.drop(index).set_names(expected_name) tm.assert_index_equal(intersect, expected) # Test empty.intersection(empty) first = index.drop(index).set_names(fname) second = index.drop(index).set_names(sname) intersect = first.intersection(second) expected = index.drop(index).set_names(expected_name) tm.assert_index_equal(intersect, expected) @pytest.mark.parametrize( "fname, sname, expected_name", [ ("A", "A", "A"), ("A", "B", None), ("A", None, None), (None, "B", None), (None, None, None), ], ) def test_intersect_unequal(self, index, fname, sname, expected_name): if isinstance(index, MultiIndex) or not index.is_unique: pytest.skip("Not for MultiIndex or repeated indices") # test copy.intersection(subset) - need sort for unicode and string first = index.copy().set_names(fname) second = index[1:].set_names(sname) intersect = first.intersection(second).sort_values() expected = index[1:].set_names(expected_name).sort_values() tm.assert_index_equal(intersect, expected) def test_to_flat_index(self, index): # 22866 if isinstance(index, MultiIndex): pytest.skip("Separate expectation for MultiIndex") result = index.to_flat_index() tm.assert_index_equal(result, index) def test_set_name_methods(self, index): new_name = "This is the new name for this index" # don't tests a MultiIndex here (as its tested separated) if isinstance(index, MultiIndex): pytest.skip("Skip check for MultiIndex") original_name = index.name new_ind = index.set_names([new_name]) assert new_ind.name == new_name assert index.name == original_name res = index.rename(new_name, inplace=True) # should return None assert res is None assert index.name == new_name assert index.names == [new_name] # FIXME: dont leave commented-out # with pytest.raises(TypeError, match="list-like"): # # should still fail even if it would be the right length # ind.set_names("a") with pytest.raises(ValueError, match="Level must be None"): index.set_names("a", level=0) # rename in place just leaves tuples and other containers alone name = ("A", "B") index.rename(name, inplace=True) assert index.name == name assert index.names == [name] def test_copy_and_deepcopy(self, index): from copy import copy, deepcopy if isinstance(index, MultiIndex): pytest.skip("Skip check for MultiIndex") for func in (copy, deepcopy): idx_copy = func(index) assert idx_copy is not index assert idx_copy.equals(index) new_copy = index.copy(deep=True, name="banana") assert new_copy.name == "banana" def test_unique(self, index): # don't test a MultiIndex here (as its tested separated) # don't test a CategoricalIndex because categories change (GH 18291) if isinstance(index, (MultiIndex, CategoricalIndex)): pytest.skip("Skip check for MultiIndex/CategoricalIndex") # GH 17896 expected = index.drop_duplicates() for level in 0, index.name, None: result = index.unique(level=level) tm.assert_index_equal(result, expected) msg = "Too many levels: Index has only 1 level, not 4" with pytest.raises(IndexError, match=msg): index.unique(level=3) msg = ( fr"Requested level \(wrong\) does not match index name " fr"\({re.escape(index.name.__repr__())}\)" ) with pytest.raises(KeyError, match=msg): index.unique(level="wrong") def test_get_unique_index(self, index): # MultiIndex tested separately if not len(index) or isinstance(index, MultiIndex): pytest.skip("Skip check for empty Index and MultiIndex") idx = index[[0] * 5] idx_unique = index[[0]] # We test against `idx_unique`, so first we make sure it's unique # and doesn't contain nans. assert idx_unique.is_unique is True try: assert idx_unique.hasnans is False except NotImplementedError: pass for dropna in [False, True]: result = idx._get_unique_index(dropna=dropna) tm.assert_index_equal(result, idx_unique) # nans: if not index._can_hold_na: pytest.skip("Skip na-check if index cannot hold na") if is_period_dtype(index.dtype): vals = index[[0] * 5]._data vals[0] = pd.NaT elif needs_i8_conversion(index.dtype): vals = index.asi8[[0] * 5] vals[0] = iNaT else: vals = index.values[[0] * 5] vals[0] = np.nan vals_unique = vals[:2] if index.dtype.kind in ["m", "M"]: # i.e. needs_i8_conversion but not period_dtype, as above vals = type(index._data)._simple_new(vals, dtype=index.dtype) vals_unique = type(index._data)._simple_new(vals_unique, dtype=index.dtype) idx_nan = index._shallow_copy(vals) idx_unique_nan = index._shallow_copy(vals_unique) assert idx_unique_nan.is_unique is True assert idx_nan.dtype == index.dtype assert idx_unique_nan.dtype == index.dtype for dropna, expected in zip([False, True], [idx_unique_nan, idx_unique]): for i in [idx_nan, idx_unique_nan]: result = i._get_unique_index(dropna=dropna) tm.assert_index_equal(result, expected) def test_mutability(self, index): if not len(index): pytest.skip("Skip check for empty Index") msg = "Index does not support mutable operations" with pytest.raises(TypeError, match=msg): index[0] = index[0] def test_view(self, index): assert index.view().name == index.name def test_searchsorted_monotonic(self, index): # GH17271 # not implemented for tuple searches in MultiIndex # or Intervals searches in IntervalIndex if isinstance(index, (MultiIndex, pd.IntervalIndex)): pytest.skip("Skip check for MultiIndex/IntervalIndex") # nothing to test if the index is empty if index.empty: pytest.skip("Skip check for empty Index") value = index[0] # determine the expected results (handle dupes for 'right') expected_left, expected_right = 0, (index == value).argmin() if expected_right == 0: # all values are the same, expected_right should be length expected_right = len(index) # test _searchsorted_monotonic in all cases # test searchsorted only for increasing if index.is_monotonic_increasing: ssm_left = index._searchsorted_monotonic(value, side="left") assert expected_left == ssm_left ssm_right = index._searchsorted_monotonic(value, side="right") assert expected_right == ssm_right ss_left = index.searchsorted(value, side="left") assert expected_left == ss_left ss_right = index.searchsorted(value, side="right") assert expected_right == ss_right elif index.is_monotonic_decreasing: ssm_left = index._searchsorted_monotonic(value, side="left") assert expected_left == ssm_left ssm_right = index._searchsorted_monotonic(value, side="right") assert expected_right == ssm_right else: # non-monotonic should raise. with pytest.raises(ValueError): index._searchsorted_monotonic(value, side="left") def test_pickle(self, index): original_name, index.name = index.name, "foo" unpickled = tm.round_trip_pickle(index) assert index.equals(unpickled) index.name = original_name def test_drop_duplicates(self, index, keep): if isinstance(index, MultiIndex): pytest.skip("MultiIndex is tested separately") if isinstance(index, RangeIndex): pytest.skip( "RangeIndex is tested in test_drop_duplicates_no_duplicates " "as it cannot hold duplicates" ) if len(index) == 0: pytest.skip( "empty index is tested in test_drop_duplicates_no_duplicates " "as it cannot hold duplicates" ) # make unique index holder = type(index) unique_values = list(set(index)) unique_idx = holder(unique_values) # make duplicated index n = len(unique_idx) duplicated_selection = np.random.choice(n, int(n * 1.5)) idx = holder(unique_idx.values[duplicated_selection]) # Series.duplicated is tested separately expected_duplicated = ( pd.Series(duplicated_selection).duplicated(keep=keep).values ) tm.assert_numpy_array_equal(idx.duplicated(keep=keep), expected_duplicated) # Series.drop_duplicates is tested separately expected_dropped = holder(pd.Series(idx).drop_duplicates(keep=keep)) tm.assert_index_equal(idx.drop_duplicates(keep=keep), expected_dropped) def test_drop_duplicates_no_duplicates(self, index): if isinstance(index, MultiIndex): pytest.skip("MultiIndex is tested separately") # make unique index if isinstance(index, RangeIndex): # RangeIndex cannot have duplicates unique_idx = index else: holder = type(index) unique_values = list(set(index)) unique_idx = holder(unique_values) # check on unique index expected_duplicated = np.array([False] * len(unique_idx), dtype="bool") tm.assert_numpy_array_equal(unique_idx.duplicated(), expected_duplicated) result_dropped = unique_idx.drop_duplicates() tm.assert_index_equal(result_dropped, unique_idx) # validate shallow copy assert result_dropped is not unique_idx def test_drop_duplicates_inplace(self, index): msg = r"drop_duplicates\(\) got an unexpected keyword argument" with pytest.raises(TypeError, match=msg): index.drop_duplicates(inplace=True) def test_has_duplicates(self, index): holder = type(index) if not len(index) or isinstance(index, (MultiIndex, RangeIndex)): # MultiIndex tested separately in: # tests/indexes/multi/test_unique_and_duplicates. # RangeIndex is unique by definition. pytest.skip("Skip check for empty Index, MultiIndex, and RangeIndex") idx = holder([index[0]] * 5) assert idx.is_unique is False assert idx.has_duplicates is True @pytest.mark.parametrize( "dtype", ["int64", "uint64", "float64", "category", "datetime64[ns]", "timedelta64[ns]"], ) def test_astype_preserves_name(self, index, dtype): # https://github.com/pandas-dev/pandas/issues/32013 if isinstance(index, MultiIndex): index.names = ["idx" + str(i) for i in range(index.nlevels)] else: index.name = "idx" try: # Some of these conversions cannot succeed so we use a try / except result = index.astype(dtype) except (ValueError, TypeError, NotImplementedError, SystemError): return if isinstance(index, MultiIndex): assert result.names == index.names else: assert result.name == index.name def test_ravel_deprecation(self, index): # GH#19956 ravel returning ndarray is deprecated with tm.assert_produces_warning(FutureWarning): index.ravel() @pytest.mark.parametrize("na_position", [None, "middle"]) def test_sort_values_invalid_na_position(index_with_missing, na_position): if isinstance(index_with_missing, (DatetimeIndex, PeriodIndex, TimedeltaIndex)): # datetime-like indices will get na_position kwarg as part of # synchronizing duplicate-sorting behavior, because we currently expect # them, other indices, and Series to sort differently (xref 35922) pytest.xfail("sort_values does not support na_position kwarg") elif isinstance(index_with_missing, (CategoricalIndex, MultiIndex)): pytest.xfail("missing value sorting order not defined for index type") if na_position not in ["first", "last"]: with pytest.raises(ValueError, match=f"invalid na_position: {na_position}"): index_with_missing.sort_values(na_position=na_position) @pytest.mark.parametrize("na_position", ["first", "last"]) def test_sort_values_with_missing(index_with_missing, na_position): # GH 35584. Test that sort_values works with missing values, # sort non-missing and place missing according to na_position if isinstance(index_with_missing, (DatetimeIndex, PeriodIndex, TimedeltaIndex)): # datetime-like indices will get na_position kwarg as part of # synchronizing duplicate-sorting behavior, because we currently expect # them, other indices, and Series to sort differently (xref 35922) pytest.xfail("sort_values does not support na_position kwarg") elif isinstance(index_with_missing, (CategoricalIndex, MultiIndex)): pytest.xfail("missing value sorting order not defined for index type") missing_count = np.sum(index_with_missing.isna()) not_na_vals = index_with_missing[index_with_missing.notna()].values sorted_values = np.sort(not_na_vals) if na_position == "first": sorted_values = np.concatenate([[None] * missing_count, sorted_values]) else: sorted_values =
np.concatenate([sorted_values, [None] * missing_count])
numpy.concatenate
""" Greedy Word Swap with Word Importance Ranking =================================================== When WIR method is set to ``unk``, this is a reimplementation of the search method from the paper: Is BERT Really Robust? A Strong Baseline for Natural Language Attack on Text Classification and Entailment by Jin et. al, 2019. See https://arxiv.org/abs/1907.11932 and https://github.com/jind11/TextFooler. """ import numpy as np import torch from torch.nn.functional import softmax from textattack.goal_function_results import GoalFunctionResultStatus from textattack.search_methods import SearchMethod from textattack.shared.validators import ( transformation_consists_of_word_swaps_and_deletions, ) class GreedyWordSwapWIR(SearchMethod): """An attack that greedily chooses from a list of possible perturbations in order of index, after ranking indices by importance. Args: wir_method: method for ranking most important words """ def __init__(self, wir_method="unk"): self.wir_method = wir_method def _get_index_order(self, initial_text): """Returns word indices of ``initial_text`` in descending order of importance.""" len_text = len(initial_text.words) if self.wir_method == "unk": leave_one_texts = [ initial_text.replace_word_at_index(i, "[UNK]") for i in range(len_text) ] leave_one_results, search_over = self.get_goal_results(leave_one_texts) index_scores = np.array([result.score for result in leave_one_results]) elif self.wir_method == "weighted-saliency": # first, compute word saliency leave_one_texts = [ initial_text.replace_word_at_index(i, "[UNK]") for i in range(len_text) ] leave_one_results, search_over = self.get_goal_results(leave_one_texts) saliency_scores = np.array([result.score for result in leave_one_results]) softmax_saliency_scores = softmax( torch.Tensor(saliency_scores), dim=0 ).numpy() # compute the largest change in score we can find by swapping each word delta_ps = [] for idx in range(len_text): transformed_text_candidates = self.get_transformations( initial_text, original_text=initial_text, indices_to_modify=[idx], ) if not transformed_text_candidates: # no valid synonym substitutions for this word delta_ps.append(0.0) continue swap_results, _ = self.get_goal_results(transformed_text_candidates) score_change = [result.score for result in swap_results] max_score_change = np.max(score_change) delta_ps.append(max_score_change) index_scores = softmax_saliency_scores *
np.array(delta_ps)
numpy.array
# ________ # / # \ / # \ / # \/ import random import textwrap import emd_mean import AdvEMDpy import emd_basis import emd_utils import numpy as np import pandas as pd import cvxpy as cvx import seaborn as sns import matplotlib.pyplot as plt from scipy.integrate import odeint from scipy.ndimage import gaussian_filter from emd_utils import time_extension, Utility from scipy.interpolate import CubicSpline from emd_hilbert import Hilbert, hilbert_spectrum from emd_preprocess import Preprocess from emd_mean import Fluctuation from AdvEMDpy import EMD # alternate packages from PyEMD import EMD as pyemd0215 import emd as emd040 sns.set(style='darkgrid') pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001) pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time) pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series) # plot 0 - addition fig = plt.figure(figsize=(9, 4)) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('First Iteration of Sifting Algorithm') plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1) plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()], c='r', label=r'$M(t_i)$', zorder=2) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4) plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()], c='c', label=r'$m(t_j)$', zorder=3) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5) plt.yticks(ticks=[-2, -1, 0, 1, 2]) plt.xticks(ticks=[0, np.pi, 2 * np.pi], labels=[r'0', r'$\pi$', r'$2\pi$']) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/pseudo_algorithm.png') plt.show() knots = np.arange(12) time = np.linspace(0, 11, 1101) basis = emd_basis.Basis(time=time, time_series=time) b_spline_basis = basis.cubic_b_spline(knots) chsi_basis = basis.chsi_basis(knots) # plot 1 plt.title('Non-Natural Cubic B-Spline Bases at Boundary') plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $') plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $') plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $') plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $') plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $') plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $']) plt.xlim(4.4, 6.6) plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.legend(loc='upper left') plt.savefig('jss_figures/boundary_bases.png') plt.show() # plot 1a - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) knots_uniform = np.linspace(0, 2 * np.pi, 51) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0] fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Uniform Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Uniform Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Uniform Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots_uniform)): axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_uniform.png') plt.show() # plot 1b - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=1, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Statically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Statically Optimised Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Statically Optimised Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots)): axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_1.png') plt.show() # plot 1c - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=2, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Dynamically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Dynamically Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Dynamically Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots[i])): axs[i].plot(knots[i][j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_2.png') plt.show() # plot 1d - addition window = 81 fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Filtering Demonstration') axs[1].set_title('Zoomed Region') preprocess_time = pseudo_alg_time.copy() np.random.seed(1) random.seed(1) preprocess_time_series = pseudo_alg_time_series + np.random.normal(0, 0.1, len(preprocess_time)) for i in random.sample(range(1000), 500): preprocess_time_series[i] += np.random.normal(0, 1) preprocess = Preprocess(time=preprocess_time, time_series=preprocess_time_series) axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[0].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[0].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[0].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[1].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[1].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[1].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_filter.png') plt.show() # plot 1e - addition fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Smoothing Demonstration') axs[1].set_title('Zoomed Region') axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[0].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) downsampled_and_decimated = preprocess.downsample() axs[0].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 11)) downsampled = preprocess.downsample(decimate=False) axs[0].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[1].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) axs[1].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 13)) axs[1].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.06, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.06, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_smooth.png') plt.show() # plot 2 fig, axs = plt.subplots(1, 2, sharey=True) axs[0].set_title('Cubic B-Spline Bases') axs[0].plot(time, b_spline_basis[2, :].T, '--', label='Basis 1') axs[0].plot(time, b_spline_basis[3, :].T, '--', label='Basis 2') axs[0].plot(time, b_spline_basis[4, :].T, '--', label='Basis 3') axs[0].plot(time, b_spline_basis[5, :].T, '--', label='Basis 4') axs[0].legend(loc='upper left') axs[0].plot(5 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].plot(6 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].set_xticks([5, 6]) axs[0].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[0].set_xlim(4.5, 6.5) axs[1].set_title('Cubic Hermite Spline Bases') axs[1].plot(time, chsi_basis[10, :].T, '--') axs[1].plot(time, chsi_basis[11, :].T, '--') axs[1].plot(time, chsi_basis[12, :].T, '--') axs[1].plot(time, chsi_basis[13, :].T, '--') axs[1].plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].set_xticks([5, 6]) axs[1].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[1].set_xlim(4.5, 6.5) plt.savefig('jss_figures/comparing_bases.png') plt.show() # plot 3 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_dash = maxima_y[-1] * np.ones_like(max_dash_time) min_dash_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_dash = minima_y[-1] * np.ones_like(min_dash_time) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) max_discard = maxima_y[-1] max_discard_time = minima_x[-1] - maxima_x[-1] + minima_x[-1] max_discard_dash_time = np.linspace(max_discard_time - width, max_discard_time + width, 101) max_discard_dash = max_discard * np.ones_like(max_discard_dash_time) dash_2_time = np.linspace(minima_x[-1], max_discard_time, 101) dash_2 = np.linspace(minima_y[-1], max_discard, 101) end_point_time = time[-1] end_point = time_series[-1] time_reflect = np.linspace((5 - a) * np.pi, (5 + a) * np.pi, 101) time_series_reflect = np.flip(np.cos(np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101)) + np.cos(5 * np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101))) time_series_anti_reflect = time_series_reflect[0] - time_series_reflect utils = emd_utils.Utility(time=time, time_series=time_series_anti_reflect) anti_max_bool = utils.max_bool_func_1st_order_fd() anti_max_point_time = time_reflect[anti_max_bool] anti_max_point = time_series_anti_reflect[anti_max_bool] utils = emd_utils.Utility(time=time, time_series=time_series_reflect) no_anchor_max_time = time_reflect[utils.max_bool_func_1st_order_fd()] no_anchor_max = time_series_reflect[utils.max_bool_func_1st_order_fd()] point_1 = 5.4 length_distance = np.linspace(maxima_y[-1], minima_y[-1], 101) length_distance_time = point_1 * np.pi * np.ones_like(length_distance) length_time = np.linspace(point_1 * np.pi - width, point_1 * np.pi + width, 101) length_top = maxima_y[-1] * np.ones_like(length_time) length_bottom = minima_y[-1] * np.ones_like(length_time) point_2 = 5.2 length_distance_2 = np.linspace(time_series[-1], minima_y[-1], 101) length_distance_time_2 = point_2 * np.pi * np.ones_like(length_distance_2) length_time_2 = np.linspace(point_2 * np.pi - width, point_2 * np.pi + width, 101) length_top_2 = time_series[-1] * np.ones_like(length_time_2) length_bottom_2 = minima_y[-1] * np.ones_like(length_time_2) symmetry_axis_1_time = minima_x[-1] * np.ones(101) symmetry_axis_2_time = time[-1] * np.ones(101) symmetry_axis = np.linspace(-2, 2, 101) end_time = np.linspace(time[-1] - width, time[-1] + width, 101) end_signal = time_series[-1] * np.ones_like(end_time) anti_symmetric_time = np.linspace(time[-1] - 0.5, time[-1] + 0.5, 101) anti_symmetric_signal = time_series[-1] * np.ones_like(anti_symmetric_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Symmetry Edge Effects Example') plt.plot(time_reflect, time_series_reflect, 'g--', LineWidth=2, label=textwrap.fill('Symmetric signal', 10)) plt.plot(time_reflect[:51], time_series_anti_reflect[:51], '--', c='purple', LineWidth=2, label=textwrap.fill('Anti-symmetric signal', 10)) plt.plot(max_dash_time, max_dash, 'k-') plt.plot(min_dash_time, min_dash, 'k-') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(length_distance_time, length_distance, 'k--') plt.plot(length_distance_time_2, length_distance_2, 'k--') plt.plot(length_time, length_top, 'k-') plt.plot(length_time, length_bottom, 'k-') plt.plot(length_time_2, length_top_2, 'k-') plt.plot(length_time_2, length_bottom_2, 'k-') plt.plot(end_time, end_signal, 'k-') plt.plot(symmetry_axis_1_time, symmetry_axis, 'r--', zorder=1) plt.plot(anti_symmetric_time, anti_symmetric_signal, 'r--', zorder=1) plt.plot(symmetry_axis_2_time, symmetry_axis, 'r--', label=textwrap.fill('Axes of symmetry', 10), zorder=1) plt.text(5.1 * np.pi, -0.7, r'$\beta$L') plt.text(5.34 * np.pi, -0.05, 'L') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(max_discard_time, max_discard, c='purple', zorder=4, label=textwrap.fill('Symmetric Discard maxima', 10)) plt.scatter(end_point_time, end_point, c='orange', zorder=4, label=textwrap.fill('Symmetric Anchor maxima', 10)) plt.scatter(anti_max_point_time, anti_max_point, c='green', zorder=4, label=textwrap.fill('Anti-Symmetric maxima', 10)) plt.scatter(no_anchor_max_time, no_anchor_max, c='gray', zorder=4, label=textwrap.fill('Symmetric maxima', 10)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_symmetry_anti.png') plt.show() # plot 4 a = 0.21 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_1 = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_dash_2 = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_dash_time_1 = maxima_x[-1] * np.ones_like(max_dash_1) max_dash_time_2 = maxima_x[-2] * np.ones_like(max_dash_1) min_dash_1 = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_dash_2 = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_dash_time_1 = minima_x[-1] * np.ones_like(min_dash_1) min_dash_time_2 = minima_x[-2] * np.ones_like(min_dash_1) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) dash_2_time = np.linspace(maxima_x[-1], minima_x[-2], 101) dash_2 = np.linspace(maxima_y[-1], minima_y[-2], 101) s1 = (minima_y[-2] - maxima_y[-1]) / (minima_x[-2] - maxima_x[-1]) slope_based_maximum_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) slope_based_maximum = minima_y[-1] + (slope_based_maximum_time - minima_x[-1]) * s1 max_dash_time_3 = slope_based_maximum_time * np.ones_like(max_dash_1) max_dash_3 = np.linspace(slope_based_maximum - width, slope_based_maximum + width, 101) dash_3_time = np.linspace(minima_x[-1], slope_based_maximum_time, 101) dash_3 = np.linspace(minima_y[-1], slope_based_maximum, 101) s2 = (minima_y[-1] - maxima_y[-1]) / (minima_x[-1] - maxima_x[-1]) slope_based_minimum_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) slope_based_minimum = slope_based_maximum - (slope_based_maximum_time - slope_based_minimum_time) * s2 min_dash_time_3 = slope_based_minimum_time * np.ones_like(min_dash_1) min_dash_3 = np.linspace(slope_based_minimum - width, slope_based_minimum + width, 101) dash_4_time = np.linspace(slope_based_maximum_time, slope_based_minimum_time) dash_4 = np.linspace(slope_based_maximum, slope_based_minimum) maxima_dash = np.linspace(2.5 - width, 2.5 + width, 101) maxima_dash_time_1 = maxima_x[-2] * np.ones_like(maxima_dash) maxima_dash_time_2 = maxima_x[-1] * np.ones_like(maxima_dash) maxima_dash_time_3 = slope_based_maximum_time * np.ones_like(maxima_dash) maxima_line_dash_time = np.linspace(maxima_x[-2], slope_based_maximum_time, 101) maxima_line_dash = 2.5 * np.ones_like(maxima_line_dash_time) minima_dash = np.linspace(-3.4 - width, -3.4 + width, 101) minima_dash_time_1 = minima_x[-2] * np.ones_like(minima_dash) minima_dash_time_2 = minima_x[-1] * np.ones_like(minima_dash) minima_dash_time_3 = slope_based_minimum_time * np.ones_like(minima_dash) minima_line_dash_time = np.linspace(minima_x[-2], slope_based_minimum_time, 101) minima_line_dash = -3.4 * np.ones_like(minima_line_dash_time) # slightly edit signal to make difference between slope-based method and improved slope-based method more clear time_series[time >= minima_x[-1]] = 1.5 * (time_series[time >= minima_x[-1]] - time_series[time == minima_x[-1]]) + \ time_series[time == minima_x[-1]] improved_slope_based_maximum_time = time[-1] improved_slope_based_maximum = time_series[-1] improved_slope_based_minimum_time = slope_based_minimum_time improved_slope_based_minimum = improved_slope_based_maximum + s2 * (improved_slope_based_minimum_time - improved_slope_based_maximum_time) min_dash_4 = np.linspace(improved_slope_based_minimum - width, improved_slope_based_minimum + width, 101) min_dash_time_4 = improved_slope_based_minimum_time * np.ones_like(min_dash_4) dash_final_time = np.linspace(improved_slope_based_maximum_time, improved_slope_based_minimum_time, 101) dash_final = np.linspace(improved_slope_based_maximum, improved_slope_based_minimum, 101) ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 0.9 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Slope-Based Edge Effects Example') plt.plot(max_dash_time_1, max_dash_1, 'k-') plt.plot(max_dash_time_2, max_dash_2, 'k-') plt.plot(max_dash_time_3, max_dash_3, 'k-') plt.plot(min_dash_time_1, min_dash_1, 'k-') plt.plot(min_dash_time_2, min_dash_2, 'k-') plt.plot(min_dash_time_3, min_dash_3, 'k-') plt.plot(min_dash_time_4, min_dash_4, 'k-') plt.plot(maxima_dash_time_1, maxima_dash, 'k-') plt.plot(maxima_dash_time_2, maxima_dash, 'k-') plt.plot(maxima_dash_time_3, maxima_dash, 'k-') plt.plot(minima_dash_time_1, minima_dash, 'k-') plt.plot(minima_dash_time_2, minima_dash, 'k-') plt.plot(minima_dash_time_3, minima_dash, 'k-') plt.text(4.34 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.74 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.12 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.50 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.30 * np.pi, 0.35, r'$s_1$') plt.text(4.43 * np.pi, -0.20, r'$s_2$') plt.text(4.30 * np.pi + (minima_x[-1] - minima_x[-2]), 0.35 + (minima_y[-1] - minima_y[-2]), r'$s_1$') plt.text(4.43 * np.pi + (slope_based_minimum_time - minima_x[-1]), -0.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.text(4.50 * np.pi + (slope_based_minimum_time - minima_x[-1]), 1.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.plot(minima_line_dash_time, minima_line_dash, 'k--') plt.plot(maxima_line_dash_time, maxima_line_dash, 'k--') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(dash_3_time, dash_3, 'k--') plt.plot(dash_4_time, dash_4, 'k--') plt.plot(dash_final_time, dash_final, 'k--') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(slope_based_maximum_time, slope_based_maximum, c='orange', zorder=4, label=textwrap.fill('Slope-based maximum', 11)) plt.scatter(slope_based_minimum_time, slope_based_minimum, c='purple', zorder=4, label=textwrap.fill('Slope-based minimum', 11)) plt.scatter(improved_slope_based_maximum_time, improved_slope_based_maximum, c='deeppink', zorder=4, label=textwrap.fill('Improved slope-based maximum', 11)) plt.scatter(improved_slope_based_minimum_time, improved_slope_based_minimum, c='dodgerblue', zorder=4, label=textwrap.fill('Improved slope-based minimum', 11)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-3, -2, -1, 0, 1, 2), ('-3', '-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_slope_based.png') plt.show() # plot 5 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] A2 = np.abs(maxima_y[-2] - minima_y[-2]) / 2 A1 = np.abs(maxima_y[-1] - minima_y[-1]) / 2 P2 = 2 * np.abs(maxima_x[-2] - minima_x[-2]) P1 = 2 * np.abs(maxima_x[-1] - minima_x[-1]) Huang_time = (P1 / P2) * (time[time >= maxima_x[-2]] - time[time == maxima_x[-2]]) + maxima_x[-1] Huang_wave = (A1 / A2) * (time_series[time >= maxima_x[-2]] - time_series[time == maxima_x[-2]]) + maxima_y[-1] Coughlin_time = Huang_time Coughlin_wave = A1 * np.cos(2 * np.pi * (1 / P1) * (Coughlin_time - Coughlin_time[0])) Average_max_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) Average_max = (maxima_y[-2] + maxima_y[-1]) / 2 Average_min_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) Average_min = (minima_y[-2] + minima_y[-1]) / 2 utils_Huang = emd_utils.Utility(time=time, time_series=Huang_wave) Huang_max_bool = utils_Huang.max_bool_func_1st_order_fd() Huang_min_bool = utils_Huang.min_bool_func_1st_order_fd() utils_Coughlin = emd_utils.Utility(time=time, time_series=Coughlin_wave) Coughlin_max_bool = utils_Coughlin.max_bool_func_1st_order_fd() Coughlin_min_bool = utils_Coughlin.min_bool_func_1st_order_fd() Huang_max_time = Huang_time[Huang_max_bool] Huang_max = Huang_wave[Huang_max_bool] Huang_min_time = Huang_time[Huang_min_bool] Huang_min = Huang_wave[Huang_min_bool] Coughlin_max_time = Coughlin_time[Coughlin_max_bool] Coughlin_max = Coughlin_wave[Coughlin_max_bool] Coughlin_min_time = Coughlin_time[Coughlin_min_bool] Coughlin_min = Coughlin_wave[Coughlin_min_bool] max_2_x_time = np.linspace(maxima_x[-2] - width, maxima_x[-2] + width, 101) max_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) max_2_x = maxima_y[-2] * np.ones_like(max_2_x_time) min_2_x_time = np.linspace(minima_x[-2] - width, minima_x[-2] + width, 101) min_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) min_2_x = minima_y[-2] * np.ones_like(min_2_x_time) dash_max_min_2_x = np.linspace(minima_y[-2], maxima_y[-2], 101) dash_max_min_2_x_time = 5.3 * np.pi * np.ones_like(dash_max_min_2_x) max_2_y = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) max_2_y_time = maxima_x[-2] * np.ones_like(max_2_y) min_2_y = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) min_2_y_time = minima_x[-2] * np.ones_like(min_2_y) dash_max_min_2_y_time = np.linspace(minima_x[-2], maxima_x[-2], 101) dash_max_min_2_y = -1.8 * np.ones_like(dash_max_min_2_y_time) max_1_x_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) max_1_x = maxima_y[-1] * np.ones_like(max_1_x_time) min_1_x_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) min_1_x = minima_y[-1] * np.ones_like(min_1_x_time) dash_max_min_1_x = np.linspace(minima_y[-1], maxima_y[-1], 101) dash_max_min_1_x_time = 5.4 * np.pi * np.ones_like(dash_max_min_1_x) max_1_y = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) max_1_y_time = maxima_x[-1] * np.ones_like(max_1_y) min_1_y = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) min_1_y_time = minima_x[-1] * np.ones_like(min_1_y) dash_max_min_1_y_time = np.linspace(minima_x[-1], maxima_x[-1], 101) dash_max_min_1_y = -2.1 * np.ones_like(dash_max_min_1_y_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Characteristic Wave Effects Example') plt.plot(time, time_series, LineWidth=2, label='Signal') plt.scatter(Huang_max_time, Huang_max, c='magenta', zorder=4, label=textwrap.fill('Huang maximum', 10)) plt.scatter(Huang_min_time, Huang_min, c='lime', zorder=4, label=textwrap.fill('Huang minimum', 10)) plt.scatter(Coughlin_max_time, Coughlin_max, c='darkorange', zorder=4, label=textwrap.fill('Coughlin maximum', 14)) plt.scatter(Coughlin_min_time, Coughlin_min, c='dodgerblue', zorder=4, label=textwrap.fill('Coughlin minimum', 14)) plt.scatter(Average_max_time, Average_max, c='orangered', zorder=4, label=textwrap.fill('Average maximum', 14)) plt.scatter(Average_min_time, Average_min, c='cyan', zorder=4, label=textwrap.fill('Average minimum', 14)) plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.plot(Huang_time, Huang_wave, '--', c='darkviolet', label=textwrap.fill('Huang Characteristic Wave', 14)) plt.plot(Coughlin_time, Coughlin_wave, '--', c='darkgreen', label=textwrap.fill('Coughlin Characteristic Wave', 14)) plt.plot(max_2_x_time, max_2_x, 'k-') plt.plot(max_2_x_time_side, max_2_x, 'k-') plt.plot(min_2_x_time, min_2_x, 'k-') plt.plot(min_2_x_time_side, min_2_x, 'k-') plt.plot(dash_max_min_2_x_time, dash_max_min_2_x, 'k--') plt.text(5.16 * np.pi, 0.85, r'$2a_2$') plt.plot(max_2_y_time, max_2_y, 'k-') plt.plot(max_2_y_time, max_2_y_side, 'k-') plt.plot(min_2_y_time, min_2_y, 'k-') plt.plot(min_2_y_time, min_2_y_side, 'k-') plt.plot(dash_max_min_2_y_time, dash_max_min_2_y, 'k--') plt.text(4.08 * np.pi, -2.2, r'$\frac{p_2}{2}$') plt.plot(max_1_x_time, max_1_x, 'k-') plt.plot(max_1_x_time_side, max_1_x, 'k-') plt.plot(min_1_x_time, min_1_x, 'k-') plt.plot(min_1_x_time_side, min_1_x, 'k-') plt.plot(dash_max_min_1_x_time, dash_max_min_1_x, 'k--') plt.text(5.42 * np.pi, -0.1, r'$2a_1$') plt.plot(max_1_y_time, max_1_y, 'k-') plt.plot(max_1_y_time, max_1_y_side, 'k-') plt.plot(min_1_y_time, min_1_y, 'k-') plt.plot(min_1_y_time, min_1_y_side, 'k-') plt.plot(dash_max_min_1_y_time, dash_max_min_1_y, 'k--') plt.text(4.48 * np.pi, -2.5, r'$\frac{p_1}{2}$') plt.xlim(3.9 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_characteristic_wave.png') plt.show() # plot 6 t = np.linspace(5, 95, 100) signal_orig = np.cos(2 * np.pi * t / 50) + 0.6 * np.cos(2 * np.pi * t / 25) + 0.5 * np.sin(2 * np.pi * t / 200) util_nn = emd_utils.Utility(time=t, time_series=signal_orig) maxima = signal_orig[util_nn.max_bool_func_1st_order_fd()] minima = signal_orig[util_nn.min_bool_func_1st_order_fd()] cs_max = CubicSpline(t[util_nn.max_bool_func_1st_order_fd()], maxima) cs_min = CubicSpline(t[util_nn.min_bool_func_1st_order_fd()], minima) time = np.linspace(0, 5 * np.pi, 1001) lsq_signal = np.cos(time) + np.cos(5 * time) knots = np.linspace(0, 5 * np.pi, 101) time_extended = time_extension(time) time_series_extended = np.zeros_like(time_extended) / 0 time_series_extended[int(len(lsq_signal) - 1):int(2 * (len(lsq_signal) - 1) + 1)] = lsq_signal neural_network_m = 200 neural_network_k = 100 # forward -> P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[(-(neural_network_m + neural_network_k - col)):(-(neural_network_m - col))] P[-1, col] = 1 # for additive constant t = lsq_signal[-neural_network_m:] # test - top seed_weights = np.ones(neural_network_k) / neural_network_k weights = 0 * seed_weights.copy() train_input = P[:-1, :] lr = 0.01 for iterations in range(1000): output = np.matmul(weights, train_input) error = (t - output) gradients = error * (- train_input) # guess average gradients average_gradients = np.mean(gradients, axis=1) # steepest descent max_gradient_vector = average_gradients * (np.abs(average_gradients) == max(np.abs(average_gradients))) adjustment = - lr * average_gradients # adjustment = - lr * max_gradient_vector weights += adjustment # test - bottom weights_right = np.hstack((weights, 0)) max_count_right = 0 min_count_right = 0 i_right = 0 while ((max_count_right < 1) or (min_count_right < 1)) and (i_right < len(lsq_signal) - 1): time_series_extended[int(2 * (len(lsq_signal) - 1) + 1 + i_right)] = \ sum(weights_right * np.hstack((time_series_extended[ int(2 * (len(lsq_signal) - 1) + 1 - neural_network_k + i_right): int(2 * (len(lsq_signal) - 1) + 1 + i_right)], 1))) i_right += 1 if i_right > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_right += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_right += 1 # backward <- P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[int(col + 1):int(col + neural_network_k + 1)] P[-1, col] = 1 # for additive constant t = lsq_signal[:neural_network_m] vx = cvx.Variable(int(neural_network_k + 1)) objective = cvx.Minimize(cvx.norm((2 * (vx * P) + 1 - t), 2)) # linear activation function is arbitrary prob = cvx.Problem(objective) result = prob.solve(verbose=True, solver=cvx.ECOS) weights_left = np.array(vx.value) max_count_left = 0 min_count_left = 0 i_left = 0 while ((max_count_left < 1) or (min_count_left < 1)) and (i_left < len(lsq_signal) - 1): time_series_extended[int(len(lsq_signal) - 2 - i_left)] = \ 2 * sum(weights_left * np.hstack((time_series_extended[int(len(lsq_signal) - 1 - i_left): int(len(lsq_signal) - 1 - i_left + neural_network_k)], 1))) + 1 i_left += 1 if i_left > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_left += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_left += 1 lsq_utils = emd_utils.Utility(time=time, time_series=lsq_signal) utils_extended = emd_utils.Utility(time=time_extended, time_series=time_series_extended) maxima = lsq_signal[lsq_utils.max_bool_func_1st_order_fd()] maxima_time = time[lsq_utils.max_bool_func_1st_order_fd()] maxima_extrapolate = time_series_extended[utils_extended.max_bool_func_1st_order_fd()][-1] maxima_extrapolate_time = time_extended[utils_extended.max_bool_func_1st_order_fd()][-1] minima = lsq_signal[lsq_utils.min_bool_func_1st_order_fd()] minima_time = time[lsq_utils.min_bool_func_1st_order_fd()] minima_extrapolate = time_series_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] minima_extrapolate_time = time_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Single Neuron Neural Network Example') plt.plot(time, lsq_signal, zorder=2, label='Signal') plt.plot(time_extended, time_series_extended, c='g', zorder=1, label=textwrap.fill('Extrapolated signal', 12)) plt.scatter(maxima_time, maxima, c='r', zorder=3, label='Maxima') plt.scatter(minima_time, minima, c='b', zorder=3, label='Minima') plt.scatter(maxima_extrapolate_time, maxima_extrapolate, c='magenta', zorder=3, label=textwrap.fill('Extrapolated maxima', 12)) plt.scatter(minima_extrapolate_time, minima_extrapolate, c='cyan', zorder=4, label=textwrap.fill('Extrapolated minima', 12)) plt.plot(((time[-302] + time[-301]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k', label=textwrap.fill('Neural network inputs', 13)) plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(((time_extended[-1001] + time_extended[-1002]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k') plt.plot(((time[-202] + time[-201]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed', label=textwrap.fill('Neural network targets', 13)) plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2), ((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2), ((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='gray') plt.plot(((time_extended[-1001] + time_extended[-1000]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed') plt.xlim(3.4 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/neural_network.png') plt.show() # plot 6a np.random.seed(0) time = np.linspace(0, 5 * np.pi, 1001) knots_51 = np.linspace(0, 5 * np.pi, 51) time_series = np.cos(2 * time) + np.cos(4 * time) + np.cos(8 * time) noise = np.random.normal(0, 1, len(time_series)) time_series += noise advemdpy = EMD(time=time, time_series=time_series) imfs_51, hts_51, ifs_51 = advemdpy.empirical_mode_decomposition(knots=knots_51, max_imfs=3, edge_effect='symmetric_anchor', verbose=False)[:3] knots_31 = np.linspace(0, 5 * np.pi, 31) imfs_31, hts_31, ifs_31 = advemdpy.empirical_mode_decomposition(knots=knots_31, max_imfs=2, edge_effect='symmetric_anchor', verbose=False)[:3] knots_11 = np.linspace(0, 5 * np.pi, 11) imfs_11, hts_11, ifs_11 = advemdpy.empirical_mode_decomposition(knots=knots_11, max_imfs=1, edge_effect='symmetric_anchor', verbose=False)[:3] fig, axs = plt.subplots(3, 1) plt.suptitle(textwrap.fill('Comparison of Trends Extracted with Different Knot Sequences', 40)) plt.subplots_adjust(hspace=0.1) axs[0].plot(time, time_series, label='Time series') axs[0].plot(time, imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 1, IMF 2, & IMF 3 with 51 knots', 21)) print(f'DFA fluctuation with 51 knots: {np.round(np.var(time_series - (imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :])), 3)}') for knot in knots_51: axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[0].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[0].set_xticklabels(['', '', '', '', '', '']) axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[0].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[0].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].plot(time, time_series, label='Time series') axs[1].plot(time, imfs_31[1, :] + imfs_31[2, :], label=textwrap.fill('Sum of IMF 1 and IMF 2 with 31 knots', 19)) axs[1].plot(time, imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 2 and IMF 3 with 51 knots', 19)) print(f'DFA fluctuation with 31 knots: {np.round(np.var(time_series - (imfs_31[1, :] + imfs_31[2, :])), 3)}') for knot in knots_31: axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[1].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[1].set_xticklabels(['', '', '', '', '', '']) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) axs[1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[1].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[1].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[1].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') axs[2].plot(time, time_series, label='Time series') axs[2].plot(time, imfs_11[1, :], label='IMF 1 with 11 knots') axs[2].plot(time, imfs_31[2, :], label='IMF 2 with 31 knots') axs[2].plot(time, imfs_51[3, :], label='IMF 3 with 51 knots') print(f'DFA fluctuation with 11 knots: {np.round(np.var(time_series - imfs_51[3, :]), 3)}') for knot in knots_11: axs[2].plot(knot *
np.ones(101)
numpy.ones
"""Routines for numerical differentiation.""" from __future__ import division import numpy as np from numpy.linalg import norm from scipy.sparse.linalg import LinearOperator from ..sparse import issparse, csc_matrix, csr_matrix, coo_matrix, find from ._group_columns import group_dense, group_sparse EPS = np.finfo(np.float64).eps def _adjust_scheme_to_bounds(x0, h, num_steps, scheme, lb, ub): """Adjust final difference scheme to the presence of bounds. Parameters ---------- x0 : ndarray, shape (n,) Point at which we wish to estimate derivative. h : ndarray, shape (n,) Desired finite difference steps. num_steps : int Number of `h` steps in one direction required to implement finite difference scheme. For example, 2 means that we need to evaluate f(x0 + 2 * h) or f(x0 - 2 * h) scheme : {'1-sided', '2-sided'} Whether steps in one or both directions are required. In other words '1-sided' applies to forward and backward schemes, '2-sided' applies to center schemes. lb : ndarray, shape (n,) Lower bounds on independent variables. ub : ndarray, shape (n,) Upper bounds on independent variables. Returns ------- h_adjusted : ndarray, shape (n,) Adjusted step sizes. Step size decreases only if a sign flip or switching to one-sided scheme doesn't allow to take a full step. use_one_sided : ndarray of bool, shape (n,) Whether to switch to one-sided scheme. Informative only for ``scheme='2-sided'``. """ if scheme == '1-sided': use_one_sided = np.ones_like(h, dtype=bool) elif scheme == '2-sided': h = np.abs(h) use_one_sided = np.zeros_like(h, dtype=bool) else: raise ValueError("`scheme` must be '1-sided' or '2-sided'.") if np.all((lb == -np.inf) & (ub == np.inf)): return h, use_one_sided h_total = h * num_steps h_adjusted = h.copy() lower_dist = x0 - lb upper_dist = ub - x0 if scheme == '1-sided': x = x0 + h_total violated = (x < lb) | (x > ub) fitting = np.abs(h_total) <= np.maximum(lower_dist, upper_dist) h_adjusted[violated & fitting] *= -1 forward = (upper_dist >= lower_dist) & ~fitting h_adjusted[forward] = upper_dist[forward] / num_steps backward = (upper_dist < lower_dist) & ~fitting h_adjusted[backward] = -lower_dist[backward] / num_steps elif scheme == '2-sided': central = (lower_dist >= h_total) & (upper_dist >= h_total) forward = (upper_dist >= lower_dist) & ~central h_adjusted[forward] = np.minimum( h[forward], 0.5 * upper_dist[forward] / num_steps) use_one_sided[forward] = True backward = (upper_dist < lower_dist) & ~central h_adjusted[backward] = -np.minimum( h[backward], 0.5 * lower_dist[backward] / num_steps) use_one_sided[backward] = True min_dist = np.minimum(upper_dist, lower_dist) / num_steps adjusted_central = (~central & (np.abs(h_adjusted) <= min_dist)) h_adjusted[adjusted_central] = min_dist[adjusted_central] use_one_sided[adjusted_central] = False return h_adjusted, use_one_sided relative_step = {"2-point": EPS**0.5, "3-point": EPS**(1/3), "cs": EPS**0.5} def _compute_absolute_step(rel_step, x0, method): if rel_step is None: rel_step = relative_step[method] sign_x0 = (x0 >= 0).astype(float) * 2 - 1 return rel_step * sign_x0 * np.maximum(1.0, np.abs(x0)) def _prepare_bounds(bounds, x0): lb, ub = [np.asarray(b, dtype=float) for b in bounds] if lb.ndim == 0: lb = np.resize(lb, x0.shape) if ub.ndim == 0: ub = np.resize(ub, x0.shape) return lb, ub def group_columns(A, order=0): """Group columns of a 2-D matrix for sparse finite differencing [1]_. Two columns are in the same group if in each row at least one of them has zero. A greedy sequential algorithm is used to construct groups. Parameters ---------- A : array_like or sparse matrix, shape (m, n) Matrix of which to group columns. order : int, iterable of int with shape (n,) or None Permutation array which defines the order of columns enumeration. If int or None, a random permutation is used with `order` used as a random seed. Default is 0, that is use a random permutation but guarantee repeatability. Returns ------- groups : ndarray of int, shape (n,) Contains values from 0 to n_groups-1, where n_groups is the number of found groups. Each value ``groups[i]`` is an index of a group to which ith column assigned. The procedure was helpful only if n_groups is significantly less than n. References ---------- .. [1] <NAME>, <NAME>, and <NAME>, "On the estimation of sparse Jacobian matrices", Journal of the Institute of Mathematics and its Applications, 13 (1974), pp. 117-120. """ if issparse(A): A = csc_matrix(A) else: A = np.atleast_2d(A) A = (A != 0).astype(np.int32) if A.ndim != 2: raise ValueError("`A` must be 2-dimensional.") m, n = A.shape if order is None or np.isscalar(order): rng = np.random.RandomState(order) order = rng.permutation(n) else: order = np.asarray(order) if order.shape != (n,): raise ValueError("`order` has incorrect shape.") A = A[:, order] if issparse(A): groups = group_sparse(m, n, A.indices, A.indptr) else: groups = group_dense(m, n, A) groups[order] = groups.copy() return groups def approx_derivative(fun, x0, method='3-point', rel_step=None, f0=None, bounds=(-np.inf, np.inf), sparsity=None, as_linear_operator=False, args=(), kwargs={}): """Compute finite difference approximation of the derivatives of a vector-valued function. If a function maps from R^n to R^m, its derivatives form m-by-n matrix called the Jacobian, where an element (i, j) is a partial derivative of f[i] with respect to x[j]. Parameters ---------- fun : callable Function of which to estimate the derivatives. The argument x passed to this function is ndarray of shape (n,) (never a scalar even if n=1). It must return 1-D array_like of shape (m,) or a scalar. x0 : array_like of shape (n,) or float Point at which to estimate the derivatives. Float will be converted to a 1-D array. method : {'3-point', '2-point', 'cs'}, optional Finite difference method to use: - '2-point' - use the first order accuracy forward or backward difference. - '3-point' - use central difference in interior points and the second order accuracy forward or backward difference near the boundary. - 'cs' - use a complex-step finite difference scheme. This assumes that the user function is real-valued and can be analytically continued to the complex plane. Otherwise, produces bogus results. rel_step : None or array_like, optional Relative step size to use. The absolute step size is computed as ``h = rel_step * sign(x0) * max(1, abs(x0))``, possibly adjusted to fit into the bounds. For ``method='3-point'`` the sign of `h` is ignored. If None (default) then step is selected automatically, see Notes. f0 : None or array_like, optional If not None it is assumed to be equal to ``fun(x0)``, in this case the ``fun(x0)`` is not called. Default is None. bounds : tuple of array_like, optional Lower and upper bounds on independent variables. Defaults to no bounds. Each bound must match the size of `x0` or be a scalar, in the latter case the bound will be the same for all variables. Use it to limit the range of function evaluation. Bounds checking is not implemented when `as_linear_operator` is True. sparsity : {None, array_like, sparse matrix, 2-tuple}, optional Defines a sparsity structure of the Jacobian matrix. If the Jacobian matrix is known to have only few non-zero elements in each row, then it's possible to estimate its several columns by a single function evaluation [3]_. To perform such economic computations two ingredients are required: * structure : array_like or sparse matrix of shape (m, n). A zero element means that a corresponding element of the Jacobian identically equals to zero. * groups : array_like of shape (n,). A column grouping for a given sparsity structure, use `group_columns` to obtain it. A single array or a sparse matrix is interpreted as a sparsity structure, and groups are computed inside the function. A tuple is interpreted as (structure, groups). If None (default), a standard dense differencing will be used. Note, that sparse differencing makes sense only for large Jacobian matrices where each row contains few non-zero elements. as_linear_operator : bool, optional When True the function returns an `scipy.sparse.linalg.LinearOperator`. Otherwise it returns a dense array or a sparse matrix depending on `sparsity`. The linear operator provides an efficient way of computing ``J.dot(p)`` for any vector ``p`` of shape (n,), but does not allow direct access to individual elements of the matrix. By default `as_linear_operator` is False. args, kwargs : tuple and dict, optional Additional arguments passed to `fun`. Both empty by default. The calling signature is ``fun(x, *args, **kwargs)``. Returns ------- J : {ndarray, sparse matrix, LinearOperator} Finite difference approximation of the Jacobian matrix. If `as_linear_operator` is True returns a LinearOperator with shape (m, n). Otherwise it returns a dense array or sparse matrix depending on how `sparsity` is defined. If `sparsity` is None then a ndarray with shape (m, n) is returned. If `sparsity` is not None returns a csr_matrix with shape (m, n). For sparse matrices and linear operators it is always returned as a 2-D structure, for ndarrays, if m=1 it is returned as a 1-D gradient array with shape (n,). See Also -------- check_derivative : Check correctness of a function computing derivatives. Notes ----- If `rel_step` is not provided, it assigned to ``EPS**(1/s)``, where EPS is machine epsilon for float64 numbers, s=2 for '2-point' method and s=3 for '3-point' method. Such relative step approximately minimizes a sum of truncation and round-off errors, see [1]_. A finite difference scheme for '3-point' method is selected automatically. The well-known central difference scheme is used for points sufficiently far from the boundary, and 3-point forward or backward scheme is used for points near the boundary. Both schemes have the second-order accuracy in terms of Taylor expansion. Refer to [2]_ for the formulas of 3-point forward and backward difference schemes. For dense differencing when m=1 Jacobian is returned with a shape (n,), on the other hand when n=1 Jacobian is returned with a shape (m, 1). Our motivation is the following: a) It handles a case of gradient computation (m=1) in a conventional way. b) It clearly separates these two different cases. b) In all cases np.atleast_2d can be called to get 2-D Jacobian with correct dimensions. References ---------- .. [1] W. H. Press et. al. "Numerical Recipes. The Art of Scientific Computing. 3rd edition", sec. 5.7. .. [2] <NAME>, <NAME>, and <NAME>, "On the estimation of sparse Jacobian matrices", Journal of the Institute of Mathematics and its Applications, 13 (1974), pp. 117-120. .. [3] <NAME>, "Generation of Finite Difference Formulas on Arbitrarily Spaced Grids", Mathematics of Computation 51, 1988. Examples -------- >>> import numpy as np >>> from scipy.optimize import approx_derivative >>> >>> def f(x, c1, c2): ... return np.array([x[0] * np.sin(c1 * x[1]), ... x[0] * np.cos(c2 * x[1])]) ... >>> x0 = np.array([1.0, 0.5 * np.pi]) >>> approx_derivative(f, x0, args=(1, 2)) array([[ 1., 0.], [-1., 0.]]) Bounds can be used to limit the region of function evaluation. In the example below we compute left and right derivative at point 1.0. >>> def g(x): ... return x**2 if x >= 1 else x ... >>> x0 = 1.0 >>> approx_derivative(g, x0, bounds=(-np.inf, 1.0)) array([ 1.]) >>> approx_derivative(g, x0, bounds=(1.0, np.inf)) array([ 2.]) """ if method not in ['2-point', '3-point', 'cs']: raise ValueError("Unknown method '%s'. " % method) x0 = np.atleast_1d(x0) if x0.ndim > 1: raise ValueError("`x0` must have at most 1 dimension.") lb, ub = _prepare_bounds(bounds, x0) if lb.shape != x0.shape or ub.shape != x0.shape: raise ValueError("Inconsistent shapes between bounds and `x0`.") if as_linear_operator and not (np.all(np.isinf(lb)) and np.all(np.isinf(ub))): raise ValueError("Bounds not supported when " "`as_linear_operator` is True.") def fun_wrapped(x): f = np.atleast_1d(fun(x, *args, **kwargs)) if f.ndim > 1: raise RuntimeError("`fun` return value has " "more than 1 dimension.") return f if f0 is None: f0 = fun_wrapped(x0) else: f0 = np.atleast_1d(f0) if f0.ndim > 1: raise ValueError("`f0` passed has more than 1 dimension.") if np.any((x0 < lb) | (x0 > ub)): raise ValueError("`x0` violates bound constraints.") if as_linear_operator: if rel_step is None: rel_step = relative_step[method] return _linear_operator_difference(fun_wrapped, x0, f0, rel_step, method) else: h = _compute_absolute_step(rel_step, x0, method) if method == '2-point': h, use_one_sided = _adjust_scheme_to_bounds( x0, h, 1, '1-sided', lb, ub) elif method == '3-point': h, use_one_sided = _adjust_scheme_to_bounds( x0, h, 1, '2-sided', lb, ub) elif method == 'cs': use_one_sided = False if sparsity is None: return _dense_difference(fun_wrapped, x0, f0, h, use_one_sided, method) else: if not issparse(sparsity) and len(sparsity) == 2: structure, groups = sparsity else: structure = sparsity groups = group_columns(sparsity) if issparse(structure): structure = csc_matrix(structure) else: structure = np.atleast_2d(structure) groups = np.atleast_1d(groups) return _sparse_difference(fun_wrapped, x0, f0, h, use_one_sided, structure, groups, method) def _linear_operator_difference(fun, x0, f0, h, method): m = f0.size n = x0.size if method == '2-point': def matvec(p): if np.array_equal(p, np.zeros_like(p)): return np.zeros(m) dx = h / norm(p) x = x0 + dx*p df = fun(x) - f0 return df / dx elif method == '3-point': def matvec(p): if np.array_equal(p, np.zeros_like(p)): return np.zeros(m) dx = 2*h /
norm(p)
numpy.linalg.norm
from __future__ import print_function import numpy as np import matplotlib.pyplot as plt class TwoLayerNet(object): """ A two-layer fully-connected neural network. The net has an input dimension of N, a hidden layer dimension of H, and performs classification over C classes. We train the network with a softmax loss function and L2 regularization on the weight matrices. The network uses a ReLU nonlinearity after the first fully connected layer. In other words, the network has the following architecture: input - fully connected layer - ReLU - fully connected layer - softmax The outputs of the second fully-connected layer are the scores for each class. """ def __init__(self, input_size, hidden_size, output_size, std=1e-4): """ Initialize the model. Weights are initialized to small random values and biases are initialized to zero. Weights and biases are stored in the variable self.params, which is a dictionary with the following keys W1: First layer weights; has shape (D, H) b1: First layer biases; has shape (H,) W2: Second layer weights; has shape (H, C) b2: Second layer biases; has shape (C,) Inputs: - input_size: The dimension D of the input data. - hidden_size: The number of neurons H in the hidden layer. - output_size: The number of classes C. """ self.params = {} self.params['W1'] = std * np.random.randn(input_size, hidden_size) self.params['b1'] = np.zeros(hidden_size) self.params['W2'] = std * np.random.randn(hidden_size, output_size) self.params['b2'] = np.zeros(output_size) def loss(self, X, y=None, reg=0.0): """ Compute the loss and gradients for a two layer fully connected neural network. Inputs: - X: Input data of shape (N, D). Each X[i] is a training sample. - y: Vector of training labels. y[i] is the label for X[i], and each y[i] is an integer in the range 0 <= y[i] < C. This parameter is optional; if it is not passed then we only return scores, and if it is passed then we instead return the loss and gradients. - reg: Regularization strength. Returns: If y is None, return a matrix scores of shape (N, C) where scores[i, c] is the score for class c on input X[i]. If y is not None, instead return a tuple of: - loss: Loss (data loss and regularization loss) for this batch of training samples. - grads: Dictionary mapping parameter names to gradients of those parameters with respect to the loss function; has the same keys as self.params. """ # Unpack variables from the params dictionary W1, b1 = self.params['W1'], self.params['b1'] W2, b2 = self.params['W2'], self.params['b2'] N, D = X.shape # Compute the forward pass scores = None ####################################################################### # TODO: Perform the forward pass, computing the class scores for the # # input. Store the result in the scores variable, which should be an # # array of shape (N, C). # ####################################################################### scores1 = X.dot(W1) + b1 # FC1 X2 = np.maximum(0, scores1) # ReLU FC1 scores = X2.dot(W2) + b2 # FC2 ####################################################################### # END OF YOUR CODE # ####################################################################### # If the targets are not given then jump out, we're done if y is None: return scores scores -= np.max(scores) # Fix Number instability scores_exp = np.exp(scores) probs = scores_exp / np.sum(scores_exp, axis=1, keepdims=True) # Compute the loss loss = None ####################################################################### # TODO: Finish the forward pass, and compute the loss. This should # # include both the data loss and L2 regularization for W1 and W2. # # Store the result in the variable loss, which should be a scalar. Use# # the Softmax classifier loss. # ####################################################################### correct_probs = -np.log(probs[np.arange(N), y]) # L_i = -log(e^correct_score/sum(e^scores))) = -log(correct_probs) loss = np.sum(correct_probs) loss /= N # L2 regularization WRT W1 and W2 loss += reg * (np.sum(W1 * W1) + np.sum(W2 * W2)) ####################################################################### # END OF YOUR CODE # ####################################################################### # Backward pass: compute gradients grads = {} ############################################################################# # TODO: Compute the backward pass, computing the derivatives of the weights # # and biases. Store the results in the grads dictionary. For example, # # grads['W1'] should store the gradient on W1, and be a matrix of same size # ############################################################################# # gradient of loss_i WRT scores_k # dL_i/ds_k = probs_k-1(y_i == k) # this means the gradient is the score for "other" classes and score-1 # for the target class d_scores = probs.copy() d_scores[np.arange(N), y] -= 1 d_scores /= N # W2 were multiplied with X2, by chain rule and multiplication # derivative, WRT W2 we need to multiply downstream derivative by X2 d_W2 = X2.T.dot(d_scores) # b2 was added, so it's d is 1 but we must multiply it with chain rule # (downstream), in this case d_scores d_b2 = np.sum(d_scores, axis=0) # W1 is upstream of X2, so we continue this way d_X2 = d_scores.dot(W2.T) # ReLU derivative is 1 for > 0, else 0 d_scores1 = d_X2 * (scores1 > 0) d_W1 = X.T.dot(d_scores1) # b1 gradient d_b1 = d_scores1.sum(axis=0) # regularization gradient (reg*W2^2) d_W2 += reg * 2 * W2 d_W1 += reg * 2 * W1 grads['W1'] = d_W1 grads['b1'] = d_b1 grads['W2'] = d_W2 grads['b2'] = d_b2 ####################################################################### # END OF YOUR CODE # ####################################################################### return loss, grads def train(self, X, y, X_val, y_val, learning_rate=1e-3, learning_rate_decay=0.95, reg=5e-6, num_iters=100, batch_size=200, verbose=False): """ Train this neural network using stochastic gradient descent. Inputs: - X: A numpy array of shape (N, D) giving training data. - y: A numpy array f shape (N,) giving training labels; y[i] = c means that X[i] has label c, where 0 <= c < C. - X_val: A numpy array of shape (N_val, D) giving validation data. - y_val: A numpy array of shape (N_val,) giving validation labels. - learning_rate: Scalar giving learning rate for optimization. - learning_rate_decay: Scalar giving factor used to decay the learning rate after each epoch. - reg: Scalar giving regularization strength. - num_iters: Number of steps to take when optimizing. - batch_size: Number of training examples to use per step. - verbose: boolean; if true print progress during optimization. """ num_train = X.shape[0] iterations_per_epoch = max(num_train / batch_size, 1) # Use SGD to optimize the parameters in self.model loss_history = [] train_acc_history = [] val_acc_history = [] for it in range(num_iters): X_batch = None y_batch = None ################################################################### # TODO: Create a random minibatch of training data and labels, # # storing them in X_batch and y_batch respectively. # ################################################################### # random indexes to sample training data/labels sample_idx =
np.random.choice(num_train, batch_size, replace=True)
numpy.random.choice
import numpy as np import cv2 import os import json import glob from PIL import Image, ImageDraw plate_diameter = 25 #cm plate_depth = 1.5 #cm plate_thickness = 0.2 #cm def Max(x, y): if (x >= y): return x else: return y def polygons_to_mask(img_shape, polygons): mask = np.zeros(img_shape, dtype=np.uint8) mask = Image.fromarray(mask) xy = list(map(tuple, polygons)) ImageDraw.Draw(mask).polygon(xy=xy, outline=1, fill=1) mask = np.array(mask, dtype=bool) return mask def mask2box(mask): index = np.argwhere(mask == 1) rows = index[:, 0] clos = index[:, 1] left_top_r = np.min(rows) left_top_c = np.min(clos) right_bottom_r = np.max(rows) right_bottom_c =
np.max(clos)
numpy.max
# ________ # / # \ / # \ / # \/ import random import textwrap import emd_mean import AdvEMDpy import emd_basis import emd_utils import numpy as np import pandas as pd import cvxpy as cvx import seaborn as sns import matplotlib.pyplot as plt from scipy.integrate import odeint from scipy.ndimage import gaussian_filter from emd_utils import time_extension, Utility from scipy.interpolate import CubicSpline from emd_hilbert import Hilbert, hilbert_spectrum from emd_preprocess import Preprocess from emd_mean import Fluctuation from AdvEMDpy import EMD # alternate packages from PyEMD import EMD as pyemd0215 import emd as emd040 sns.set(style='darkgrid') pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001) pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time) pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series) # plot 0 - addition fig = plt.figure(figsize=(9, 4)) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('First Iteration of Sifting Algorithm') plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1) plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()], c='r', label=r'$M(t_i)$', zorder=2) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4) plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()], c='c', label=r'$m(t_j)$', zorder=3) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5) plt.yticks(ticks=[-2, -1, 0, 1, 2]) plt.xticks(ticks=[0, np.pi, 2 * np.pi], labels=[r'0', r'$\pi$', r'$2\pi$']) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/pseudo_algorithm.png') plt.show() knots = np.arange(12) time = np.linspace(0, 11, 1101) basis = emd_basis.Basis(time=time, time_series=time) b_spline_basis = basis.cubic_b_spline(knots) chsi_basis = basis.chsi_basis(knots) # plot 1 plt.title('Non-Natural Cubic B-Spline Bases at Boundary') plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $') plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $') plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $') plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $') plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $') plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $']) plt.xlim(4.4, 6.6) plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.legend(loc='upper left') plt.savefig('jss_figures/boundary_bases.png') plt.show() # plot 1a - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) knots_uniform = np.linspace(0, 2 * np.pi, 51) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0] fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Uniform Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Uniform Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Uniform Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots_uniform)): axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_uniform.png') plt.show() # plot 1b - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=1, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Statically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Statically Optimised Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Statically Optimised Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots)): axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_1.png') plt.show() # plot 1c - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=2, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Dynamically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Dynamically Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Dynamically Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots[i])): axs[i].plot(knots[i][j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_2.png') plt.show() # plot 1d - addition window = 81 fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Filtering Demonstration') axs[1].set_title('Zoomed Region') preprocess_time = pseudo_alg_time.copy() np.random.seed(1) random.seed(1) preprocess_time_series = pseudo_alg_time_series + np.random.normal(0, 0.1, len(preprocess_time)) for i in random.sample(range(1000), 500): preprocess_time_series[i] += np.random.normal(0, 1) preprocess = Preprocess(time=preprocess_time, time_series=preprocess_time_series) axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[0].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[0].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[0].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[1].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[1].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[1].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_filter.png') plt.show() # plot 1e - addition fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Smoothing Demonstration') axs[1].set_title('Zoomed Region') axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[0].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) downsampled_and_decimated = preprocess.downsample() axs[0].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 11)) downsampled = preprocess.downsample(decimate=False) axs[0].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[1].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) axs[1].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 13)) axs[1].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.06, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.06, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_smooth.png') plt.show() # plot 2 fig, axs = plt.subplots(1, 2, sharey=True) axs[0].set_title('Cubic B-Spline Bases') axs[0].plot(time, b_spline_basis[2, :].T, '--', label='Basis 1') axs[0].plot(time, b_spline_basis[3, :].T, '--', label='Basis 2') axs[0].plot(time, b_spline_basis[4, :].T, '--', label='Basis 3') axs[0].plot(time, b_spline_basis[5, :].T, '--', label='Basis 4') axs[0].legend(loc='upper left') axs[0].plot(5 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].plot(6 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].set_xticks([5, 6]) axs[0].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[0].set_xlim(4.5, 6.5) axs[1].set_title('Cubic Hermite Spline Bases') axs[1].plot(time, chsi_basis[10, :].T, '--') axs[1].plot(time, chsi_basis[11, :].T, '--') axs[1].plot(time, chsi_basis[12, :].T, '--') axs[1].plot(time, chsi_basis[13, :].T, '--') axs[1].plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].set_xticks([5, 6]) axs[1].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[1].set_xlim(4.5, 6.5) plt.savefig('jss_figures/comparing_bases.png') plt.show() # plot 3 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) +
np.cos(5 * time)
numpy.cos
# coding: utf-8 # Licensed under a 3-clause BSD style license - see LICENSE.rst """ Test the Logarithmic Units and Quantities """ from __future__ import (absolute_import, unicode_literals, division, print_function) from ...extern import six from ...extern.six.moves import zip import pickle import itertools import pytest import numpy as np from numpy.testing.utils import assert_allclose from ...tests.helper import assert_quantity_allclose from ... import units as u, constants as c lu_units = [u.dex, u.mag, u.decibel] lu_subclasses = [u.DexUnit, u.MagUnit, u.DecibelUnit] lq_subclasses = [u.Dex, u.Magnitude, u.Decibel] pu_sample = (u.dimensionless_unscaled, u.m, u.g/u.s**2, u.Jy) class TestLogUnitCreation(object): def test_logarithmic_units(self): """Check logarithmic units are set up correctly.""" assert u.dB.to(u.dex) == 0.1 assert u.dex.to(u.mag) == -2.5 assert u.mag.to(u.dB) == -4 @pytest.mark.parametrize('lu_unit, lu_cls', zip(lu_units, lu_subclasses)) def test_callable_units(self, lu_unit, lu_cls): assert isinstance(lu_unit, u.UnitBase) assert callable(lu_unit) assert lu_unit._function_unit_class is lu_cls @pytest.mark.parametrize('lu_unit', lu_units) def test_equality_to_normal_unit_for_dimensionless(self, lu_unit): lu = lu_unit() assert lu == lu._default_function_unit # eg, MagUnit() == u.mag assert lu._default_function_unit == lu # and u.mag == MagUnit() @pytest.mark.parametrize('lu_unit, physical_unit', itertools.product(lu_units, pu_sample)) def test_call_units(self, lu_unit, physical_unit): """Create a LogUnit subclass using the callable unit and physical unit, and do basic check that output is right.""" lu1 = lu_unit(physical_unit) assert lu1.physical_unit == physical_unit assert lu1.function_unit == lu1._default_function_unit def test_call_invalid_unit(self): with pytest.raises(TypeError): u.mag([]) with pytest.raises(ValueError): u.mag(u.mag()) @pytest.mark.parametrize('lu_cls, physical_unit', itertools.product( lu_subclasses + [u.LogUnit], pu_sample)) def test_subclass_creation(self, lu_cls, physical_unit): """Create a LogUnit subclass object for given physical unit, and do basic check that output is right.""" lu1 = lu_cls(physical_unit) assert lu1.physical_unit == physical_unit assert lu1.function_unit == lu1._default_function_unit lu2 = lu_cls(physical_unit, function_unit=2*lu1._default_function_unit) assert lu2.physical_unit == physical_unit assert lu2.function_unit == u.Unit(2*lu2._default_function_unit) with pytest.raises(ValueError): lu_cls(physical_unit, u.m) def test_predefined_magnitudes(): assert_quantity_allclose((-21.1*u.STmag).physical, 1.*u.erg/u.cm**2/u.s/u.AA) assert_quantity_allclose((-48.6*u.ABmag).physical, 1.*u.erg/u.cm**2/u.s/u.Hz) assert_quantity_allclose((0*u.M_bol).physical, c.L_bol0) assert_quantity_allclose((0*u.m_bol).physical, c.L_bol0/(4.*np.pi*(10.*c.pc)**2)) def test_predefined_reinitialisation(): assert u.mag('ST') == u.STmag assert u.mag('AB') == u.ABmag assert u.mag('Bol') == u.M_bol assert u.mag('bol') == u.m_bol def test_predefined_string_roundtrip(): """Ensure roundtripping; see #5015""" with u.magnitude_zero_points.enable(): assert u.Unit(u.STmag.to_string()) == u.STmag assert u.Unit(u.ABmag.to_string()) == u.ABmag assert u.Unit(u.M_bol.to_string()) == u.M_bol assert u.Unit(u.m_bol.to_string()) == u.m_bol def test_inequality(): """Check __ne__ works (regresssion for #5342).""" lu1 = u.mag(u.Jy) lu2 = u.dex(u.Jy) lu3 = u.mag(u.Jy**2) lu4 = lu3 - lu1 assert lu1 != lu2 assert lu1 != lu3 assert lu1 == lu4 class TestLogUnitStrings(object): def test_str(self): """Do some spot checks that str, repr, etc. work as expected.""" lu1 = u.mag(u.Jy) assert str(lu1) == 'mag(Jy)' assert repr(lu1) == 'Unit("mag(Jy)")' assert lu1.to_string('generic') == 'mag(Jy)' with pytest.raises(ValueError): lu1.to_string('fits') lu2 = u.dex() assert str(lu2) == 'dex' assert repr(lu2) == 'Unit("dex(1)")' assert lu2.to_string() == 'dex(1)' lu3 = u.MagUnit(u.Jy, function_unit=2*u.mag) assert str(lu3) == '2 mag(Jy)' assert repr(lu3) == 'MagUnit("Jy", unit="2 mag")' assert lu3.to_string() == '2 mag(Jy)' lu4 = u.mag(u.ct) assert lu4.to_string('generic') == 'mag(ct)' assert lu4.to_string('latex') == ('$\\mathrm{mag}$$\\mathrm{\\left( ' '\\mathrm{ct} \\right)}$') assert lu4._repr_latex_() == lu4.to_string('latex') class TestLogUnitConversion(object): @pytest.mark.parametrize('lu_unit, physical_unit', itertools.product(lu_units, pu_sample)) def test_physical_unit_conversion(self, lu_unit, physical_unit): """Check various LogUnit subclasses are equivalent and convertible to their non-log counterparts.""" lu1 = lu_unit(physical_unit) assert lu1.is_equivalent(physical_unit) assert lu1.to(physical_unit, 0.) == 1. assert physical_unit.is_equivalent(lu1) assert physical_unit.to(lu1, 1.) == 0. pu = u.Unit(8.*physical_unit) assert lu1.is_equivalent(physical_unit) assert lu1.to(pu, 0.) == 0.125 assert pu.is_equivalent(lu1) assert_allclose(pu.to(lu1, 0.125), 0., atol=1.e-15) # Check we round-trip. value = np.linspace(0., 10., 6) assert_allclose(pu.to(lu1, lu1.to(pu, value)), value, atol=1.e-15) # And that we're not just returning True all the time. pu2 = u.g assert not lu1.is_equivalent(pu2) with pytest.raises(u.UnitsError): lu1.to(pu2) assert not pu2.is_equivalent(lu1) with pytest.raises(u.UnitsError): pu2.to(lu1) @pytest.mark.parametrize('lu_unit', lu_units) def test_container_unit_conversion(self, lu_unit): """Check that conversion to logarithmic units (u.mag, u.dB, u.dex) is only possible when the physical unit is dimensionless.""" values = np.linspace(0., 10., 6) lu1 = lu_unit(u.dimensionless_unscaled) assert lu1.is_equivalent(lu1.function_unit) assert_allclose(lu1.to(lu1.function_unit, values), values) lu2 = lu_unit(u.Jy) assert not lu2.is_equivalent(lu2.function_unit) with pytest.raises(u.UnitsError): lu2.to(lu2.function_unit, values) @pytest.mark.parametrize( 'flu_unit, tlu_unit, physical_unit', itertools.product(lu_units, lu_units, pu_sample)) def test_subclass_conversion(self, flu_unit, tlu_unit, physical_unit): """Check various LogUnit subclasses are equivalent and convertible to each other if they correspond to equivalent physical units.""" values = np.linspace(0., 10., 6) flu = flu_unit(physical_unit) tlu = tlu_unit(physical_unit) assert flu.is_equivalent(tlu) assert_allclose(flu.to(tlu), flu.function_unit.to(tlu.function_unit)) assert_allclose(flu.to(tlu, values), values * flu.function_unit.to(tlu.function_unit)) tlu2 = tlu_unit(u.Unit(100.*physical_unit)) assert flu.is_equivalent(tlu2) # Check that we round-trip. assert_allclose(flu.to(tlu2, tlu2.to(flu, values)), values, atol=1.e-15) tlu3 = tlu_unit(physical_unit.to_system(u.si)[0]) assert flu.is_equivalent(tlu3) assert_allclose(flu.to(tlu3, tlu3.to(flu, values)), values, atol=1.e-15) tlu4 = tlu_unit(u.g) assert not flu.is_equivalent(tlu4) with pytest.raises(u.UnitsError): flu.to(tlu4, values) def test_unit_decomposition(self): lu = u.mag(u.Jy) assert lu.decompose() == u.mag(u.Jy.decompose()) assert lu.decompose().physical_unit.bases == [u.kg, u.s] assert lu.si == u.mag(u.Jy.si) assert lu.si.physical_unit.bases == [u.kg, u.s] assert lu.cgs == u.mag(u.Jy.cgs) assert lu.cgs.physical_unit.bases == [u.g, u.s] def test_unit_multiple_possible_equivalencies(self): lu = u.mag(u.Jy) assert lu.is_equivalent(pu_sample) class TestLogUnitArithmetic(object): def test_multiplication_division(self): """Check that multiplication/division with other units is only possible when the physical unit is dimensionless, and that this turns the unit into a normal one.""" lu1 = u.mag(u.Jy) with pytest.raises(u.UnitsError): lu1 * u.m with pytest.raises(u.UnitsError): u.m * lu1 with pytest.raises(u.UnitsError): lu1 / lu1 for unit in (u.dimensionless_unscaled, u.m, u.mag, u.dex): with pytest.raises(u.UnitsError): lu1 / unit lu2 = u.mag(u.dimensionless_unscaled) with pytest.raises(u.UnitsError): lu2 * lu1 with pytest.raises(u.UnitsError): lu2 / lu1 # But dimensionless_unscaled can be cancelled. assert lu2 / lu2 == u.dimensionless_unscaled # With dimensionless, normal units are OK, but we return a plain unit. tf = lu2 * u.m tr = u.m * lu2 for t in (tf, tr): assert not isinstance(t, type(lu2)) assert t == lu2.function_unit * u.m with u.set_enabled_equivalencies(u.logarithmic()): with pytest.raises(u.UnitsError): t.to(lu2.physical_unit) # Now we essentially have a LogUnit with a prefactor of 100, # so should be equivalent again. t = tf / u.cm with u.set_enabled_equivalencies(u.logarithmic()): assert t.is_equivalent(lu2.function_unit) assert_allclose(t.to(u.dimensionless_unscaled, np.arange(3.)/100.), lu2.to(lu2.physical_unit, np.arange(3.))) # If we effectively remove lu1, a normal unit should be returned. t2 = tf / lu2 assert not isinstance(t2, type(lu2)) assert t2 == u.m t3 = tf / lu2.function_unit assert not isinstance(t3, type(lu2)) assert t3 == u.m # For completeness, also ensure non-sensical operations fail with pytest.raises(TypeError): lu1 * object() with pytest.raises(TypeError): slice(None) * lu1 with pytest.raises(TypeError): lu1 / [] with pytest.raises(TypeError): 1 / lu1 @pytest.mark.parametrize('power', (2, 0.5, 1, 0)) def test_raise_to_power(self, power): """Check that raising LogUnits to some power is only possible when the physical unit is dimensionless, and that conversion is turned off when the resulting logarithmic unit (such as mag**2) is incompatible.""" lu1 = u.mag(u.Jy) if power == 0: assert lu1 ** power == u.dimensionless_unscaled elif power == 1: assert lu1 ** power == lu1 else: with pytest.raises(u.UnitsError): lu1 ** power # With dimensionless, though, it works, but returns a normal unit. lu2 = u.mag(u.dimensionless_unscaled) t = lu2**power if power == 0: assert t == u.dimensionless_unscaled elif power == 1: assert t == lu2 else: assert not isinstance(t, type(lu2)) assert t == lu2.function_unit**power # also check we roundtrip t2 = t**(1./power) assert t2 == lu2.function_unit with u.set_enabled_equivalencies(u.logarithmic()): assert_allclose(t2.to(u.dimensionless_unscaled, np.arange(3.)), lu2.to(lu2.physical_unit, np.arange(3.))) @pytest.mark.parametrize('other', pu_sample) def test_addition_subtraction_to_normal_units_fails(self, other): lu1 = u.mag(u.Jy) with pytest.raises(u.UnitsError): lu1 + other with pytest.raises(u.UnitsError): lu1 - other with pytest.raises(u.UnitsError): other - lu1 def test_addition_subtraction_to_non_units_fails(self): lu1 = u.mag(u.Jy) with pytest.raises(TypeError): lu1 + 1. with pytest.raises(TypeError): lu1 - [1., 2., 3.] @pytest.mark.parametrize( 'other', (u.mag, u.mag(), u.mag(u.Jy), u.mag(u.m), u.Unit(2*u.mag), u.MagUnit('', 2.*u.mag))) def test_addition_subtraction(self, other): """Check physical units are changed appropriately""" lu1 = u.mag(u.Jy) other_pu = getattr(other, 'physical_unit', u.dimensionless_unscaled) lu_sf = lu1 + other assert lu_sf.is_equivalent(lu1.physical_unit * other_pu) lu_sr = other + lu1 assert lu_sr.is_equivalent(lu1.physical_unit * other_pu) lu_df = lu1 - other assert lu_df.is_equivalent(lu1.physical_unit / other_pu) lu_dr = other - lu1 assert lu_dr.is_equivalent(other_pu / lu1.physical_unit) def test_complicated_addition_subtraction(self): """for fun, a more complicated example of addition and subtraction""" dm0 = u.Unit('DM', 1./(4.*np.pi*(10.*u.pc)**2)) lu_dm = u.mag(dm0) lu_absST = u.STmag - lu_dm assert lu_absST.is_equivalent(u.erg/u.s/u.AA) def test_neg_pos(self): lu1 = u.mag(u.Jy) neg_lu = -lu1 assert neg_lu != lu1 assert neg_lu.physical_unit == u.Jy**-1 assert -neg_lu == lu1 pos_lu = +lu1 assert pos_lu is not lu1 assert pos_lu == lu1 def test_pickle(): lu1 = u.dex(u.cm/u.s**2) s = pickle.dumps(lu1) lu2 = pickle.loads(s) assert lu1 == lu2 def test_hashable(): lu1 = u.dB(u.mW) lu2 = u.dB(u.m) lu3 = u.dB(u.mW) assert hash(lu1) != hash(lu2) assert hash(lu1) == hash(lu3) luset = {lu1, lu2, lu3} assert len(luset) == 2 class TestLogQuantityCreation(object): @pytest.mark.parametrize('lq, lu', zip(lq_subclasses + [u.LogQuantity], lu_subclasses + [u.LogUnit])) def test_logarithmic_quantities(self, lq, lu): """Check logarithmic quantities are all set up correctly""" assert lq._unit_class == lu assert type(lu()._quantity_class(1.)) is lq @pytest.mark.parametrize('lq_cls, physical_unit', itertools.product(lq_subclasses, pu_sample)) def test_subclass_creation(self, lq_cls, physical_unit): """Create LogQuantity subclass objects for some physical units, and basic check on transformations""" value = np.arange(1., 10.) log_q = lq_cls(value * physical_unit) assert log_q.unit.physical_unit == physical_unit assert log_q.unit.function_unit == log_q.unit._default_function_unit assert_allclose(log_q.physical.value, value) with pytest.raises(ValueError): lq_cls(value, physical_unit) @pytest.mark.parametrize( 'unit', (u.mag, u.mag(), u.mag(u.Jy), u.mag(u.m), u.Unit(2*u.mag), u.MagUnit('', 2.*u.mag), u.MagUnit(u.Jy, -1*u.mag), u.MagUnit(u.m, -2.*u.mag))) def test_different_units(self, unit): q = u.Magnitude(1.23, unit) assert q.unit.function_unit == getattr(unit, 'function_unit', unit) assert q.unit.physical_unit is getattr(unit, 'physical_unit', u.dimensionless_unscaled) @pytest.mark.parametrize('value, unit', ( (1.*u.mag(u.Jy), None), (1.*u.dex(u.Jy), None), (1.*u.mag(u.W/u.m**2/u.Hz), u.mag(u.Jy)), (1.*u.dex(u.W/u.m**2/u.Hz), u.mag(u.Jy)))) def test_function_values(self, value, unit): lq = u.Magnitude(value, unit) assert lq == value assert lq.unit.function_unit == u.mag assert lq.unit.physical_unit == getattr(unit, 'physical_unit', value.unit.physical_unit) @pytest.mark.parametrize( 'unit', (u.mag(), u.mag(u.Jy), u.mag(u.m), u.MagUnit('', 2.*u.mag), u.MagUnit(u.Jy, -1*u.mag), u.MagUnit(u.m, -2.*u.mag))) def test_indirect_creation(self, unit): q1 = 2.5 * unit assert isinstance(q1, u.Magnitude) assert q1.value == 2.5 assert q1.unit == unit pv = 100. * unit.physical_unit q2 = unit * pv assert q2.unit == unit assert q2.unit.physical_unit == pv.unit assert q2.to_value(unit.physical_unit) == 100. assert (q2._function_view / u.mag).to_value(1) == -5. q3 = unit / 0.4 assert q3 == q1 def test_from_view(self): # Cannot view a physical quantity as a function quantity, since the # values would change. q = [100., 1000.] * u.cm/u.s**2 with pytest.raises(TypeError): q.view(u.Dex) # But fine if we have the right magnitude. q = [2., 3.] * u.dex lq = q.view(u.Dex) assert isinstance(lq, u.Dex) assert lq.unit.physical_unit == u.dimensionless_unscaled assert np.all(q == lq) def test_using_quantity_class(self): """Check that we can use Quantity if we have subok=True""" # following issue #5851 lu = u.dex(u.AA) with pytest.raises(u.UnitTypeError): u.Quantity(1., lu) q = u.Quantity(1., lu, subok=True) assert type(q) is lu._quantity_class def test_conversion_to_and_from_physical_quantities(): """Ensures we can convert from regular quantities.""" mst = [10., 12., 14.] * u.STmag flux_lambda = mst.physical mst_roundtrip = flux_lambda.to(u.STmag) # check we return a logquantity; see #5178. assert isinstance(mst_roundtrip, u.Magnitude) assert mst_roundtrip.unit == mst.unit assert_allclose(mst_roundtrip.value, mst.value) wave = [4956.8, 4959.55, 4962.3] * u.AA flux_nu = mst.to(u.Jy, equivalencies=u.spectral_density(wave)) mst_roundtrip2 = flux_nu.to(u.STmag, u.spectral_density(wave)) assert isinstance(mst_roundtrip2, u.Magnitude) assert mst_roundtrip2.unit == mst.unit assert_allclose(mst_roundtrip2.value, mst.value) def test_quantity_decomposition(): lq = 10.*u.mag(u.Jy) assert lq.decompose() == lq assert lq.decompose().unit.physical_unit.bases == [u.kg, u.s] assert lq.si == lq assert lq.si.unit.physical_unit.bases == [u.kg, u.s] assert lq.cgs == lq assert lq.cgs.unit.physical_unit.bases == [u.g, u.s] class TestLogQuantityViews(object): def setup(self): self.lq = u.Magnitude(np.arange(10.) * u.Jy) self.lq2 = u.Magnitude(np.arange(5.)) def test_value_view(self): lq_value = self.lq.value assert type(lq_value) is np.ndarray lq_value[2] = -1. assert np.all(self.lq.value == lq_value) def test_function_view(self): lq_fv = self.lq._function_view assert type(lq_fv) is u.Quantity assert lq_fv.unit is self.lq.unit.function_unit lq_fv[3] = -2. * lq_fv.unit assert np.all(self.lq.value == lq_fv.value) def test_quantity_view(self): # Cannot view as Quantity, since the unit cannot be represented. with pytest.raises(TypeError): self.lq.view(u.Quantity) # But a dimensionless one is fine. q2 = self.lq2.view(u.Quantity) assert q2.unit is u.mag assert np.all(q2.value == self.lq2.value) lq3 = q2.view(u.Magnitude) assert type(lq3.unit) is u.MagUnit assert lq3.unit.physical_unit == u.dimensionless_unscaled assert np.all(lq3 == self.lq2) class TestLogQuantitySlicing(object): def test_item_get_and_set(self): lq1 = u.Magnitude(np.arange(1., 11.)*u.Jy) assert lq1[9] == u.Magnitude(10.*u.Jy) lq1[2] = 100.*u.Jy assert lq1[2] == u.Magnitude(100.*u.Jy) with pytest.raises(u.UnitsError): lq1[2] = 100.*u.m with pytest.raises(u.UnitsError): lq1[2] = 100.*u.mag with pytest.raises(u.UnitsError): lq1[2] = u.Magnitude(100.*u.m) assert lq1[2] == u.Magnitude(100.*u.Jy) def test_slice_get_and_set(self): lq1 = u.Magnitude(np.arange(1., 10.)*u.Jy) lq1[2:4] = 100.*u.Jy assert np.all(lq1[2:4] == u.Magnitude(100.*u.Jy)) with pytest.raises(u.UnitsError): lq1[2:4] = 100.*u.m with pytest.raises(u.UnitsError): lq1[2:4] = 100.*u.mag with pytest.raises(u.UnitsError): lq1[2:4] = u.Magnitude(100.*u.m) assert np.all(lq1[2] == u.Magnitude(100.*u.Jy)) class TestLogQuantityArithmetic(object): def test_multiplication_division(self): """Check that multiplication/division with other quantities is only possible when the physical unit is dimensionless, and that this turns the result into a normal quantity.""" lq = u.Magnitude(np.arange(1., 11.)*u.Jy) with pytest.raises(u.UnitsError): lq * (1.*u.m) with pytest.raises(u.UnitsError): (1.*u.m) * lq with pytest.raises(u.UnitsError): lq / lq for unit in (u.m, u.mag, u.dex): with pytest.raises(u.UnitsError): lq / unit lq2 = u.Magnitude(np.arange(1, 11.)) with pytest.raises(u.UnitsError): lq2 * lq with pytest.raises(u.UnitsError): lq2 / lq with pytest.raises(u.UnitsError): lq / lq2 # but dimensionless_unscaled can be cancelled r = lq2 / u.Magnitude(2.) assert r.unit == u.dimensionless_unscaled assert np.all(r.value == lq2.value/2.) # with dimensionless, normal units OK, but return normal quantities tf = lq2 * u.m tr = u.m * lq2 for t in (tf, tr): assert not isinstance(t, type(lq2)) assert t.unit == lq2.unit.function_unit * u.m with u.set_enabled_equivalencies(u.logarithmic()): with pytest.raises(u.UnitsError): t.to(lq2.unit.physical_unit) t = tf / (50.*u.cm) # now we essentially have the same quantity but with a prefactor of 2 assert t.unit.is_equivalent(lq2.unit.function_unit) assert_allclose(t.to(lq2.unit.function_unit), lq2._function_view*2) @pytest.mark.parametrize('power', (2, 0.5, 1, 0)) def test_raise_to_power(self, power): """Check that raising LogQuantities to some power is only possible when the physical unit is dimensionless, and that conversion is turned off when the resulting logarithmic unit (say, mag**2) is incompatible.""" lq = u.Magnitude(np.arange(1., 4.)*u.Jy) if power == 0: assert np.all(lq ** power == 1.) elif power == 1: assert np.all(lq ** power == lq) else: with pytest.raises(u.UnitsError): lq ** power # with dimensionless, it works, but falls back to normal quantity # (except for power=1) lq2 = u.Magnitude(np.arange(10.)) t = lq2**power if power == 0: assert t.unit is u.dimensionless_unscaled assert np.all(t.value == 1.) elif power == 1: assert np.all(t == lq2) else: assert not isinstance(t, type(lq2)) assert t.unit == lq2.unit.function_unit ** power with u.set_enabled_equivalencies(u.logarithmic()): with pytest.raises(u.UnitsError): t.to(u.dimensionless_unscaled) def test_error_on_lq_as_power(self): lq = u.Magnitude(np.arange(1., 4.)*u.Jy) with pytest.raises(TypeError): lq ** lq @pytest.mark.parametrize('other', pu_sample) def test_addition_subtraction_to_normal_units_fails(self, other): lq = u.Magnitude(np.arange(1., 10.)*u.Jy) q = 1.23 * other with pytest.raises(u.UnitsError): lq + q with pytest.raises(u.UnitsError): lq - q with pytest.raises(u.UnitsError): q - lq @pytest.mark.parametrize( 'other', (1.23 * u.mag, 2.34 * u.mag(), u.Magnitude(3.45 * u.Jy), u.Magnitude(4.56 * u.m), 5.67 * u.Unit(2*u.mag), u.Magnitude(6.78, 2.*u.mag))) def test_addition_subtraction(self, other): """Check that addition/subtraction with quantities with magnitude or MagUnit units works, and that it changes the physical units appropriately.""" lq = u.Magnitude(np.arange(1., 10.)*u.Jy) other_physical = other.to(getattr(other.unit, 'physical_unit', u.dimensionless_unscaled), equivalencies=u.logarithmic()) lq_sf = lq + other assert_allclose(lq_sf.physical, lq.physical * other_physical) lq_sr = other + lq assert_allclose(lq_sr.physical, lq.physical * other_physical) lq_df = lq - other assert_allclose(lq_df.physical, lq.physical / other_physical) lq_dr = other - lq assert_allclose(lq_dr.physical, other_physical / lq.physical) @pytest.mark.parametrize('other', pu_sample) def test_inplace_addition_subtraction_unit_checks(self, other): lu1 = u.mag(u.Jy) lq1 = u.Magnitude(np.arange(1., 10.), lu1) with pytest.raises(u.UnitsError): lq1 += other assert np.all(lq1.value == np.arange(1., 10.)) assert lq1.unit == lu1 with pytest.raises(u.UnitsError): lq1 -= other assert np.all(lq1.value == np.arange(1., 10.)) assert lq1.unit == lu1 @pytest.mark.parametrize( 'other', (1.23 * u.mag, 2.34 * u.mag(), u.Magnitude(3.45 * u.Jy), u.Magnitude(4.56 * u.m), 5.67 * u.Unit(2*u.mag), u.Magnitude(6.78, 2.*u.mag))) def test_inplace_addition_subtraction(self, other): """Check that inplace addition/subtraction with quantities with magnitude or MagUnit units works, and that it changes the physical units appropriately.""" lq = u.Magnitude(np.arange(1., 10.)*u.Jy) other_physical = other.to(getattr(other.unit, 'physical_unit', u.dimensionless_unscaled), equivalencies=u.logarithmic()) lq_sf = lq.copy() lq_sf += other assert_allclose(lq_sf.physical, lq.physical * other_physical) lq_df = lq.copy() lq_df -= other assert_allclose(lq_df.physical, lq.physical / other_physical) def test_complicated_addition_subtraction(self): """For fun, a more complicated example of addition and subtraction.""" dm0 = u.Unit('DM', 1./(4.*np.pi*(10.*u.pc)**2)) DMmag = u.mag(dm0) m_st = 10. * u.STmag dm = 5. * DMmag M_st = m_st - dm assert M_st.unit.is_equivalent(u.erg/u.s/u.AA) assert np.abs(M_st.physical / (m_st.physical*4.*np.pi*(100.*u.pc)**2) - 1.) < 1.e-15 class TestLogQuantityComparisons(object): def test_comparison_to_non_quantities_fails(self): lq = u.Magnitude(np.arange(1., 10.)*u.Jy) # On python2, ordering operations always succeed, given essentially # meaningless results. if not six.PY2: with pytest.raises(TypeError): lq > 'a' assert not (lq == 'a') assert lq != 'a' def test_comparison(self): lq1 = u.Magnitude(np.arange(1., 4.)*u.Jy) lq2 = u.Magnitude(2.*u.Jy) assert np.all((lq1 > lq2) == np.array([True, False, False])) assert np.all((lq1 == lq2) ==
np.array([False, True, False])
numpy.array
import pandas as pd import numpy as np import matplotlib.pyplot as plt import os import matplotlib.pyplot as plt import CurveFit import shutil #find all DIRECTORIES containing non-hidden files ending in FILENAME def getDataDirectories(DIRECTORY, FILENAME="valLoss.txt"): directories=[] for directory in os.scandir(DIRECTORY): for item in os.scandir(directory): if item.name.endswith(FILENAME) and not item.name.startswith("."): directories.append(directory.path) return directories #get all non-hidden data files in DIRECTORY with extension EXT def getDataFiles(DIRECTORY, EXT='txt'): datafiles=[] for item in os.scandir(DIRECTORY): if item.name.endswith("."+EXT) and not item.name.startswith("."): datafiles.append(item.path) return datafiles #checking if loss ever doesn't decrease for numEpochs epochs in a row. def stopsDecreasing(loss, epoch, numEpochs): minLoss=np.inf epochMin=0 for i in range(0,loss.size): if loss[i] < minLoss: minLoss=loss[i] epochMin=epoch[i] elif (epoch[i]-epochMin) >= numEpochs: return i, minLoss return i, minLoss #dirpath is where the accuracy and loss files are stored. want to move the files into the same format expected by grabNNData. def createFolders(SEARCHDIR, SAVEDIR): for item in os.scandir(SEARCHDIR): name=str(item.name) files=name.split('-') SAVEFULLDIR=SAVEDIR+str(files[0]) if not os.path.exists(SAVEFULLDIR): try: os.makedirs(SAVEFULLDIR) except FileExistsError: #directory already exists--must have been created between the if statement & our attempt at making directory pass shutil.move(item.path, SAVEFULLDIR+"/"+str(files[1])) #a function to read in information (e.g. accuracy, loss) stored at FILENAME def grabNNData(FILENAME, header='infer', sep=' '): data = pd.read_csv(FILENAME, sep, header=header) if ('epochs' in data.columns) and ('trainLoss' in data.columns) and ('valLoss' in data.columns) and ('valAcc' in data.columns) and ('batch_size' in data.columns) and ('learning_rate' in data.columns): sortedData=data.sort_values(by="epochs", axis=0, ascending=True) epoch=np.array(sortedData['epochs']) trainLoss=np.array(sortedData['trainLoss']) valLoss=np.array(sortedData['valLoss']) valAcc=np.array(sortedData['valAcc']) batch_size=
np.array(sortedData['batch_size'])
numpy.array
#!/usr/bin/env python # encoding: utf-8 -*- """ This module contains unit tests of the rmgpy.reaction module. """ import numpy import unittest from external.wip import work_in_progress from rmgpy.species import Species, TransitionState from rmgpy.reaction import Reaction from rmgpy.statmech.translation import Translation, IdealGasTranslation from rmgpy.statmech.rotation import Rotation, LinearRotor, NonlinearRotor, KRotor, SphericalTopRotor from rmgpy.statmech.vibration import Vibration, HarmonicOscillator from rmgpy.statmech.torsion import Torsion, HinderedRotor from rmgpy.statmech.conformer import Conformer from rmgpy.kinetics import Arrhenius from rmgpy.thermo import Wilhoit import rmgpy.constants as constants ################################################################################ class PseudoSpecies: """ Can be used in place of a :class:`rmg.species.Species` for isomorphism checks. PseudoSpecies('a') is isomorphic with PseudoSpecies('A') but nothing else. """ def __init__(self, label): self.label = label def __repr__(self): return "PseudoSpecies('{0}')".format(self.label) def __str__(self): return self.label def isIsomorphic(self, other): return self.label.lower() == other.label.lower() class TestReactionIsomorphism(unittest.TestCase): """ Contains unit tests of the isomorphism testing of the Reaction class. """ def makeReaction(self,reaction_string): """" Make a Reaction (containing PseudoSpecies) of from a string like 'Ab=CD' """ reactants, products = reaction_string.split('=') reactants = [PseudoSpecies(i) for i in reactants] products = [PseudoSpecies(i) for i in products] return Reaction(reactants=reactants, products=products) def test1to1(self): r1 = self.makeReaction('A=B') self.assertTrue(r1.isIsomorphic(self.makeReaction('a=B'))) self.assertTrue(r1.isIsomorphic(self.makeReaction('b=A'))) self.assertFalse(r1.isIsomorphic(self.makeReaction('B=a'),eitherDirection=False)) self.assertFalse(r1.isIsomorphic(self.makeReaction('A=C'))) self.assertFalse(r1.isIsomorphic(self.makeReaction('A=BB'))) def test1to2(self): r1 = self.makeReaction('A=BC') self.assertTrue(r1.isIsomorphic(self.makeReaction('a=Bc'))) self.assertTrue(r1.isIsomorphic(self.makeReaction('cb=a'))) self.assertTrue(r1.isIsomorphic(self.makeReaction('a=cb'),eitherDirection=False)) self.assertFalse(r1.isIsomorphic(self.makeReaction('bc=a'),eitherDirection=False)) self.assertFalse(r1.isIsomorphic(self.makeReaction('a=c'))) self.assertFalse(r1.isIsomorphic(self.makeReaction('ab=c'))) def test2to2(self): r1 = self.makeReaction('AB=CD') self.assertTrue(r1.isIsomorphic(self.makeReaction('ab=cd'))) self.assertTrue(r1.isIsomorphic(self.makeReaction('ab=dc'),eitherDirection=False)) self.assertTrue(r1.isIsomorphic(self.makeReaction('dc=ba'))) self.assertFalse(r1.isIsomorphic(self.makeReaction('cd=ab'),eitherDirection=False)) self.assertFalse(r1.isIsomorphic(self.makeReaction('ab=ab'))) self.assertFalse(r1.isIsomorphic(self.makeReaction('ab=cde'))) def test2to3(self): r1 = self.makeReaction('AB=CDE') self.assertTrue(r1.isIsomorphic(self.makeReaction('ab=cde'))) self.assertTrue(r1.isIsomorphic(self.makeReaction('ba=edc'),eitherDirection=False)) self.assertTrue(r1.isIsomorphic(self.makeReaction('dec=ba'))) self.assertFalse(r1.isIsomorphic(self.makeReaction('cde=ab'),eitherDirection=False)) self.assertFalse(r1.isIsomorphic(self.makeReaction('ab=abc'))) self.assertFalse(r1.isIsomorphic(self.makeReaction('abe=cde'))) class TestReaction(unittest.TestCase): """ Contains unit tests of the Reaction class. """ def setUp(self): """ A method that is called prior to each unit test in this class. """ ethylene = Species( label = 'C2H4', conformer = Conformer( E0 = (44.7127, 'kJ/mol'), modes = [ IdealGasTranslation( mass = (28.0313, 'amu'), ), NonlinearRotor( inertia = ( [3.41526, 16.6498, 20.065], 'amu*angstrom^2', ), symmetry = 4, ), HarmonicOscillator( frequencies = ( [828.397, 970.652, 977.223, 1052.93, 1233.55, 1367.56, 1465.09, 1672.25, 3098.46, 3111.7, 3165.79, 3193.54], 'cm^-1', ), ), ], spinMultiplicity = 1, opticalIsomers = 1, ), ) hydrogen = Species( label = 'H', conformer = Conformer( E0 = (211.794, 'kJ/mol'), modes = [ IdealGasTranslation( mass = (1.00783, 'amu'), ), ], spinMultiplicity = 2, opticalIsomers = 1, ), ) ethyl = Species( label = 'C2H5', conformer = Conformer( E0 = (111.603, 'kJ/mol'), modes = [ IdealGasTranslation( mass = (29.0391, 'amu'), ), NonlinearRotor( inertia = ( [4.8709, 22.2353, 23.9925], 'amu*angstrom^2', ), symmetry = 1, ), HarmonicOscillator( frequencies = ( [482.224, 791.876, 974.355, 1051.48, 1183.21, 1361.36, 1448.65, 1455.07, 1465.48, 2688.22, 2954.51, 3033.39, 3101.54, 3204.73], 'cm^-1', ), ), HinderedRotor( inertia = (1.11481, 'amu*angstrom^2'), symmetry = 6, barrier = (0.244029, 'kJ/mol'), semiclassical = None, ), ], spinMultiplicity = 2, opticalIsomers = 1, ), ) TS = TransitionState( label = 'TS', conformer = Conformer( E0 = (266.694, 'kJ/mol'), modes = [ IdealGasTranslation( mass = (29.0391, 'amu'), ), NonlinearRotor( inertia = ( [6.78512, 22.1437, 22.2114], 'amu*angstrom^2', ), symmetry = 1, ), HarmonicOscillator( frequencies = ( [412.75, 415.206, 821.495, 924.44, 982.714, 1024.16, 1224.21, 1326.36, 1455.06, 1600.35, 3101.46, 3110.55, 3175.34, 3201.88], 'cm^-1', ), ), ], spinMultiplicity = 2, opticalIsomers = 1, ), frequency = (-750.232, 'cm^-1'), ) self.reaction = Reaction( reactants = [hydrogen, ethylene], products = [ethyl], kinetics = Arrhenius( A = (501366000.0, 'cm^3/(mol*s)'), n = 1.637, Ea = (4.32508, 'kJ/mol'), T0 = (1, 'K'), Tmin = (300, 'K'), Tmax = (2500, 'K'), ), transitionState = TS, ) # CC(=O)O[O] acetylperoxy = Species( label='acetylperoxy', thermo=Wilhoit(Cp0=(4.0*constants.R,"J/(mol*K)"), CpInf=(21.0*constants.R,"J/(mol*K)"), a0=-3.95, a1=9.26, a2=-15.6, a3=8.55, B=(500.0,"K"), H0=(-6.151e+04,"J/mol"), S0=(-790.2,"J/(mol*K)")), ) # C[C]=O acetyl = Species( label='acetyl', thermo=Wilhoit(Cp0=(4.0*constants.R,"J/(mol*K)"), CpInf=(15.5*constants.R,"J/(mol*K)"), a0=0.2541, a1=-0.4712, a2=-4.434, a3=2.25, B=(500.0,"K"), H0=(-1.439e+05,"J/mol"), S0=(-524.6,"J/(mol*K)")), ) # [O][O] oxygen = Species( label='oxygen', thermo=Wilhoit(Cp0=(3.5*constants.R,"J/(mol*K)"), CpInf=(4.5*constants.R,"J/(mol*K)"), a0=-0.9324, a1=26.18, a2=-70.47, a3=44.12, B=(500.0,"K"), H0=(1.453e+04,"J/mol"), S0=(-12.19,"J/(mol*K)")), ) self.reaction2 = Reaction( reactants=[acetyl, oxygen], products=[acetylperoxy], kinetics = Arrhenius( A = (2.65e12, 'cm^3/(mol*s)'), n = 0.0, Ea = (0.0, 'kJ/mol'), T0 = (1, 'K'), Tmin = (300, 'K'), Tmax = (2000, 'K'), ), ) def testIsIsomerization(self): """ Test the Reaction.isIsomerization() method. """ isomerization = Reaction(reactants=[Species()], products=[Species()]) association = Reaction(reactants=[Species(),Species()], products=[Species()]) dissociation = Reaction(reactants=[Species()], products=[Species(),Species()]) bimolecular = Reaction(reactants=[Species(),Species()], products=[Species(),Species()]) self.assertTrue(isomerization.isIsomerization()) self.assertFalse(association.isIsomerization()) self.assertFalse(dissociation.isIsomerization()) self.assertFalse(bimolecular.isIsomerization()) def testIsAssociation(self): """ Test the Reaction.isAssociation() method. """ isomerization = Reaction(reactants=[Species()], products=[Species()]) association = Reaction(reactants=[Species(),Species()], products=[Species()]) dissociation = Reaction(reactants=[Species()], products=[Species(),Species()]) bimolecular = Reaction(reactants=[Species(),Species()], products=[Species(),Species()]) self.assertFalse(isomerization.isAssociation()) self.assertTrue(association.isAssociation()) self.assertFalse(dissociation.isAssociation()) self.assertFalse(bimolecular.isAssociation()) def testIsDissociation(self): """ Test the Reaction.isDissociation() method. """ isomerization = Reaction(reactants=[Species()], products=[Species()]) association = Reaction(reactants=[Species(),Species()], products=[Species()]) dissociation = Reaction(reactants=[Species()], products=[Species(),Species()]) bimolecular = Reaction(reactants=[Species(),Species()], products=[Species(),Species()]) self.assertFalse(isomerization.isDissociation()) self.assertFalse(association.isDissociation()) self.assertTrue(dissociation.isDissociation()) self.assertFalse(bimolecular.isDissociation()) def testHasTemplate(self): """ Test the Reaction.hasTemplate() method. """ reactants = self.reaction.reactants[:] products = self.reaction.products[:] self.assertTrue(self.reaction.hasTemplate(reactants, products)) self.assertTrue(self.reaction.hasTemplate(products, reactants)) self.assertFalse(self.reaction2.hasTemplate(reactants, products)) self.assertFalse(self.reaction2.hasTemplate(products, reactants)) reactants.reverse() products.reverse() self.assertTrue(self.reaction.hasTemplate(reactants, products)) self.assertTrue(self.reaction.hasTemplate(products, reactants)) self.assertFalse(self.reaction2.hasTemplate(reactants, products)) self.assertFalse(self.reaction2.hasTemplate(products, reactants)) reactants = self.reaction2.reactants[:] products = self.reaction2.products[:] self.assertFalse(self.reaction.hasTemplate(reactants, products)) self.assertFalse(self.reaction.hasTemplate(products, reactants)) self.assertTrue(self.reaction2.hasTemplate(reactants, products)) self.assertTrue(self.reaction2.hasTemplate(products, reactants)) reactants.reverse() products.reverse() self.assertFalse(self.reaction.hasTemplate(reactants, products)) self.assertFalse(self.reaction.hasTemplate(products, reactants)) self.assertTrue(self.reaction2.hasTemplate(reactants, products)) self.assertTrue(self.reaction2.hasTemplate(products, reactants)) def testEnthalpyOfReaction(self): """ Test the Reaction.getEnthalpyOfReaction() method. """ Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64) Hlist0 = [float(v) for v in ['-146007', '-145886', '-144195', '-141973', '-139633', '-137341', '-135155', '-133093', '-131150', '-129316']] Hlist = self.reaction2.getEnthalpiesOfReaction(Tlist) for i in range(len(Tlist)): self.assertAlmostEqual(Hlist[i] / 1000., Hlist0[i] / 1000., 2) def testEntropyOfReaction(self): """ Test the Reaction.getEntropyOfReaction() method. """ Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64) Slist0 = [float(v) for v in ['-156.793', '-156.872', '-153.504', '-150.317', '-147.707', '-145.616', '-143.93', '-142.552', '-141.407', '-140.441']] Slist = self.reaction2.getEntropiesOfReaction(Tlist) for i in range(len(Tlist)): self.assertAlmostEqual(Slist[i], Slist0[i], 2) def testFreeEnergyOfReaction(self): """ Test the Reaction.getFreeEnergyOfReaction() method. """ Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64) Glist0 = [float(v) for v in ['-114648', '-83137.2', '-52092.4', '-21719.3', '8073.53', '37398.1', '66346.8', '94990.6', '123383', '151565']] Glist = self.reaction2.getFreeEnergiesOfReaction(Tlist) for i in range(len(Tlist)): self.assertAlmostEqual(Glist[i] / 1000., Glist0[i] / 1000., 2) def testEquilibriumConstantKa(self): """ Test the Reaction.getEquilibriumConstant() method. """ Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64) Kalist0 = [float(v) for v in ['8.75951e+29', '7.1843e+10', '34272.7', '26.1877', '0.378696', '0.0235579', '0.00334673', '0.000792389', '0.000262777', '0.000110053']] Kalist = self.reaction2.getEquilibriumConstants(Tlist, type='Ka') for i in range(len(Tlist)): self.assertAlmostEqual(Kalist[i] / Kalist0[i], 1.0, 4) def testEquilibriumConstantKc(self): """ Test the Reaction.getEquilibriumConstant() method. """ Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64) Kclist0 = [float(v) for v in ['1.45661e+28', '2.38935e+09', '1709.76', '1.74189', '0.0314866', '0.00235045', '0.000389568', '0.000105413', '3.93273e-05', '1.83006e-05']] Kclist = self.reaction2.getEquilibriumConstants(Tlist, type='Kc') for i in range(len(Tlist)): self.assertAlmostEqual(Kclist[i] / Kclist0[i], 1.0, 4) def testEquilibriumConstantKp(self): """ Test the Reaction.getEquilibriumConstant() method. """ Tlist = numpy.arange(200.0, 2001.0, 200.0, numpy.float64) Kplist0 = [float(v) for v in ['8.75951e+24', '718430', '0.342727', '0.000261877', '3.78696e-06', '2.35579e-07', '3.34673e-08', '7.92389e-09', '2.62777e-09', '1.10053e-09']] Kplist = self.reaction2.getEquilibriumConstants(Tlist, type='Kp') for i in range(len(Tlist)): self.assertAlmostEqual(Kplist[i] / Kplist0[i], 1.0, 4) def testStoichiometricCoefficient(self): """ Test the Reaction.getStoichiometricCoefficient() method. """ for reactant in self.reaction.reactants: self.assertEqual(self.reaction.getStoichiometricCoefficient(reactant), -1) for product in self.reaction.products: self.assertEqual(self.reaction.getStoichiometricCoefficient(product), 1) for reactant in self.reaction2.reactants: self.assertEqual(self.reaction.getStoichiometricCoefficient(reactant), 0) for product in self.reaction2.products: self.assertEqual(self.reaction.getStoichiometricCoefficient(product), 0) def testRateCoefficient(self): """ Test the Reaction.getRateCoefficient() method. """ Tlist =
numpy.arange(200.0, 2001.0, 200.0, numpy.float64)
numpy.arange
# pvtrace is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # pvtrace is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import numpy as np from external.transformations import translation_matrix, rotation_matrix import external.transformations as tf from Trace import Photon from Geometry import Box, Cylinder, FinitePlane, transform_point, transform_direction, rotation_matrix_from_vector_alignment, norm from Materials import Spectrum def random_spherecial_vector(): # This method of calculating isotropic vectors is taken from GNU Scientific Library LOOP = True while LOOP: x = -1. + 2. * np.random.uniform() y = -1. + 2. * np.random.uniform() s = x**2 + y**2 if s <= 1.0: LOOP = False z = -1. + 2. * s a = 2 * np.sqrt(1 - s) x = a * x y = a * y return np.array([x,y,z]) class SimpleSource(object): """A light source that will generate photons of a single colour, direction and position.""" def __init__(self, position=[0,0,0], direction=[0,0,1], wavelength=555, use_random_polarisation=False): super(SimpleSource, self).__init__() self.position = position self.direction = direction self.wavelength = wavelength self.use_random_polarisation = use_random_polarisation self.throw = 0 self.source_id = "SimpleSource_" + str(id(self)) def photon(self): photon = Photon() photon.source = self.source_id photon.position = np.array(self.position) photon.direction = np.array(self.direction) photon.active = True photon.wavelength = self.wavelength # If use_polarisation is set generate a random polarisation vector of the photon if self.use_random_polarisation: # Randomise rotation angle around xy-plane, the transform from +z to the direction of the photon vec = random_spherecial_vector() vec[2] = 0. vec = norm(vec) R = rotation_matrix_from_vector_alignment(self.direction, [0,0,1]) photon.polarisation = transform_direction(vec, R) else: photon.polarisation = None photon.id = self.throw self.throw = self.throw + 1 return photon class Laser(object): """A light source that will generate photons of a single colour, direction and position.""" def __init__(self, position=[0,0,0], direction=[0,0,1], wavelength=555, polarisation=None): super(Laser, self).__init__() self.position = np.array(position) self.direction = np.array(direction) self.wavelength = wavelength assert polarisation != None, "Polarisation of the Laser is not set." self.polarisation = np.array(polarisation) self.throw = 0 self.source_id = "LaserSource_" + str(id(self)) def photon(self): photon = Photon() photon.source = self.source_id photon.position =
np.array(self.position)
numpy.array
"""Test the search module""" from collections.abc import Iterable, Sized from io import StringIO from itertools import chain, product from functools import partial import pickle import sys from types import GeneratorType import re import numpy as np import scipy.sparse as sp import pytest from sklearn.utils.fixes import sp_version from sklearn.utils._testing import assert_raises from sklearn.utils._testing import assert_warns from sklearn.utils._testing import assert_warns_message from sklearn.utils._testing import assert_raise_message from sklearn.utils._testing import assert_array_equal from sklearn.utils._testing import assert_array_almost_equal from sklearn.utils._testing import assert_allclose from sklearn.utils._testing import assert_almost_equal from sklearn.utils._testing import ignore_warnings from sklearn.utils._mocking import CheckingClassifier, MockDataFrame from scipy.stats import bernoulli, expon, uniform from sklearn.base import BaseEstimator, ClassifierMixin from sklearn.base import clone from sklearn.exceptions import NotFittedError from sklearn.datasets import make_classification from sklearn.datasets import make_blobs from sklearn.datasets import make_multilabel_classification from sklearn.model_selection import fit_grid_point from sklearn.model_selection import train_test_split from sklearn.model_selection import KFold from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import StratifiedShuffleSplit from sklearn.model_selection import LeaveOneGroupOut from sklearn.model_selection import LeavePGroupsOut from sklearn.model_selection import GroupKFold from sklearn.model_selection import GroupShuffleSplit from sklearn.model_selection import GridSearchCV from sklearn.model_selection import RandomizedSearchCV from sklearn.model_selection import ParameterGrid from sklearn.model_selection import ParameterSampler from sklearn.model_selection._search import BaseSearchCV from sklearn.model_selection._validation import FitFailedWarning from sklearn.svm import LinearSVC, SVC from sklearn.tree import DecisionTreeRegressor from sklearn.tree import DecisionTreeClassifier from sklearn.cluster import KMeans from sklearn.neighbors import KernelDensity from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import f1_score from sklearn.metrics import recall_score from sklearn.metrics import accuracy_score from sklearn.metrics import make_scorer from sklearn.metrics import roc_auc_score from sklearn.metrics.pairwise import euclidean_distances from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline from sklearn.linear_model import Ridge, SGDClassifier, LinearRegression from sklearn.experimental import enable_hist_gradient_boosting # noqa from sklearn.ensemble import HistGradientBoostingClassifier from sklearn.model_selection.tests.common import OneTimeSplitter # Neither of the following two estimators inherit from BaseEstimator, # to test hyperparameter search on user-defined classifiers. class MockClassifier: """Dummy classifier to test the parameter search algorithms""" def __init__(self, foo_param=0): self.foo_param = foo_param def fit(self, X, Y): assert len(X) == len(Y) self.classes_ = np.unique(Y) return self def predict(self, T): return T.shape[0] def transform(self, X): return X + self.foo_param def inverse_transform(self, X): return X - self.foo_param predict_proba = predict predict_log_proba = predict decision_function = predict def score(self, X=None, Y=None): if self.foo_param > 1: score = 1. else: score = 0. return score def get_params(self, deep=False): return {'foo_param': self.foo_param} def set_params(self, **params): self.foo_param = params['foo_param'] return self class LinearSVCNoScore(LinearSVC): """An LinearSVC classifier that has no score method.""" @property def score(self): raise AttributeError X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) y = np.array([1, 1, 2, 2]) def assert_grid_iter_equals_getitem(grid): assert list(grid) == [grid[i] for i in range(len(grid))] @pytest.mark.parametrize("klass", [ParameterGrid, partial(ParameterSampler, n_iter=10)]) @pytest.mark.parametrize( "input, error_type, error_message", [(0, TypeError, r'Parameter .* is not a dict or a list \(0\)'), ([{'foo': [0]}, 0], TypeError, r'Parameter .* is not a dict \(0\)'), ({'foo': 0}, TypeError, "Parameter.* value is not iterable .*" r"\(key='foo', value=0\)")] ) def test_validate_parameter_input(klass, input, error_type, error_message): with pytest.raises(error_type, match=error_message): klass(input) def test_parameter_grid(): # Test basic properties of ParameterGrid. params1 = {"foo": [1, 2, 3]} grid1 = ParameterGrid(params1) assert isinstance(grid1, Iterable) assert isinstance(grid1, Sized) assert len(grid1) == 3 assert_grid_iter_equals_getitem(grid1) params2 = {"foo": [4, 2], "bar": ["ham", "spam", "eggs"]} grid2 = ParameterGrid(params2) assert len(grid2) == 6 # loop to assert we can iterate over the grid multiple times for i in range(2): # tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2) points = set(tuple(chain(*(sorted(p.items())))) for p in grid2) assert (points == set(("bar", x, "foo", y) for x, y in product(params2["bar"], params2["foo"]))) assert_grid_iter_equals_getitem(grid2) # Special case: empty grid (useful to get default estimator settings) empty = ParameterGrid({}) assert len(empty) == 1 assert list(empty) == [{}] assert_grid_iter_equals_getitem(empty) assert_raises(IndexError, lambda: empty[1]) has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}]) assert len(has_empty) == 4 assert list(has_empty) == [{'C': 1}, {'C': 10}, {}, {'C': .5}] assert_grid_iter_equals_getitem(has_empty) def test_grid_search(): # Test that the best estimator contains the right value for foo_param clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=3, verbose=3) # make sure it selects the smallest parameter in case of ties old_stdout = sys.stdout sys.stdout = StringIO() grid_search.fit(X, y) sys.stdout = old_stdout assert grid_search.best_estimator_.foo_param == 2 assert_array_equal(grid_search.cv_results_["param_foo_param"].data, [1, 2, 3]) # Smoke test the score etc: grid_search.score(X, y) grid_search.predict_proba(X) grid_search.decision_function(X) grid_search.transform(X) # Test exception handling on scoring grid_search.scoring = 'sklearn' assert_raises(ValueError, grid_search.fit, X, y) def test_grid_search_pipeline_steps(): # check that parameters that are estimators are cloned before fitting pipe = Pipeline([('regressor', LinearRegression())]) param_grid = {'regressor': [LinearRegression(), Ridge()]} grid_search = GridSearchCV(pipe, param_grid, cv=2) grid_search.fit(X, y) regressor_results = grid_search.cv_results_['param_regressor'] assert isinstance(regressor_results[0], LinearRegression) assert isinstance(regressor_results[1], Ridge) assert not hasattr(regressor_results[0], 'coef_') assert not hasattr(regressor_results[1], 'coef_') assert regressor_results[0] is not grid_search.best_estimator_ assert regressor_results[1] is not grid_search.best_estimator_ # check that we didn't modify the parameter grid that was passed assert not hasattr(param_grid['regressor'][0], 'coef_') assert not hasattr(param_grid['regressor'][1], 'coef_') @pytest.mark.parametrize("SearchCV", [GridSearchCV, RandomizedSearchCV]) def test_SearchCV_with_fit_params(SearchCV): X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = CheckingClassifier(expected_fit_params=['spam', 'eggs']) searcher = SearchCV( clf, {'foo_param': [1, 2, 3]}, cv=2, error_score="raise" ) # The CheckingClassifier generates an assertion error if # a parameter is missing or has length != len(X). err_msg = r"Expected fit parameter\(s\) \['eggs'\] not seen." with pytest.raises(AssertionError, match=err_msg): searcher.fit(X, y, spam=np.ones(10)) err_msg = "Fit parameter spam has length 1; expected" with pytest.raises(AssertionError, match=err_msg): searcher.fit(X, y, spam=np.ones(1), eggs=np.zeros(10)) searcher.fit(X, y, spam=
np.ones(10)
numpy.ones
# ________ # / # \ / # \ / # \/ import random import textwrap import emd_mean import AdvEMDpy import emd_basis import emd_utils import numpy as np import pandas as pd import cvxpy as cvx import seaborn as sns import matplotlib.pyplot as plt from scipy.integrate import odeint from scipy.ndimage import gaussian_filter from emd_utils import time_extension, Utility from scipy.interpolate import CubicSpline from emd_hilbert import Hilbert, hilbert_spectrum from emd_preprocess import Preprocess from emd_mean import Fluctuation from AdvEMDpy import EMD # alternate packages from PyEMD import EMD as pyemd0215 import emd as emd040 sns.set(style='darkgrid') pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001) pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time) pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series) # plot 0 - addition fig = plt.figure(figsize=(9, 4)) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('First Iteration of Sifting Algorithm') plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1) plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()], c='r', label=r'$M(t_i)$', zorder=2) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4) plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()], c='c', label=r'$m(t_j)$', zorder=3) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5) plt.yticks(ticks=[-2, -1, 0, 1, 2]) plt.xticks(ticks=[0, np.pi, 2 * np.pi], labels=[r'0', r'$\pi$', r'$2\pi$']) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/pseudo_algorithm.png') plt.show() knots = np.arange(12) time = np.linspace(0, 11, 1101) basis = emd_basis.Basis(time=time, time_series=time) b_spline_basis = basis.cubic_b_spline(knots) chsi_basis = basis.chsi_basis(knots) # plot 1 plt.title('Non-Natural Cubic B-Spline Bases at Boundary') plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $') plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $') plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $') plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $') plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $') plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $']) plt.xlim(4.4, 6.6) plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.legend(loc='upper left') plt.savefig('jss_figures/boundary_bases.png') plt.show() # plot 1a - addition knot_demonstrate_time =
np.linspace(0, 2 * np.pi, 1001)
numpy.linspace
"""Test the search module""" from collections.abc import Iterable, Sized from io import StringIO from itertools import chain, product from functools import partial import pickle import sys from types import GeneratorType import re import numpy as np import scipy.sparse as sp import pytest from sklearn.utils.fixes import sp_version from sklearn.utils._testing import assert_raises from sklearn.utils._testing import assert_warns from sklearn.utils._testing import assert_warns_message from sklearn.utils._testing import assert_raise_message from sklearn.utils._testing import assert_array_equal from sklearn.utils._testing import assert_array_almost_equal from sklearn.utils._testing import assert_allclose from sklearn.utils._testing import assert_almost_equal from sklearn.utils._testing import ignore_warnings from sklearn.utils._mocking import CheckingClassifier, MockDataFrame from scipy.stats import bernoulli, expon, uniform from sklearn.base import BaseEstimator, ClassifierMixin from sklearn.base import clone from sklearn.exceptions import NotFittedError from sklearn.datasets import make_classification from sklearn.datasets import make_blobs from sklearn.datasets import make_multilabel_classification from sklearn.model_selection import fit_grid_point from sklearn.model_selection import train_test_split from sklearn.model_selection import KFold from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import StratifiedShuffleSplit from sklearn.model_selection import LeaveOneGroupOut from sklearn.model_selection import LeavePGroupsOut from sklearn.model_selection import GroupKFold from sklearn.model_selection import GroupShuffleSplit from sklearn.model_selection import GridSearchCV from sklearn.model_selection import RandomizedSearchCV from sklearn.model_selection import ParameterGrid from sklearn.model_selection import ParameterSampler from sklearn.model_selection._search import BaseSearchCV from sklearn.model_selection._validation import FitFailedWarning from sklearn.svm import LinearSVC, SVC from sklearn.tree import DecisionTreeRegressor from sklearn.tree import DecisionTreeClassifier from sklearn.cluster import KMeans from sklearn.neighbors import KernelDensity from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import f1_score from sklearn.metrics import recall_score from sklearn.metrics import accuracy_score from sklearn.metrics import make_scorer from sklearn.metrics import roc_auc_score from sklearn.metrics.pairwise import euclidean_distances from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline from sklearn.linear_model import Ridge, SGDClassifier, LinearRegression from sklearn.experimental import enable_hist_gradient_boosting # noqa from sklearn.ensemble import HistGradientBoostingClassifier from sklearn.model_selection.tests.common import OneTimeSplitter # Neither of the following two estimators inherit from BaseEstimator, # to test hyperparameter search on user-defined classifiers. class MockClassifier: """Dummy classifier to test the parameter search algorithms""" def __init__(self, foo_param=0): self.foo_param = foo_param def fit(self, X, Y): assert len(X) == len(Y) self.classes_ = np.unique(Y) return self def predict(self, T): return T.shape[0] def transform(self, X): return X + self.foo_param def inverse_transform(self, X): return X - self.foo_param predict_proba = predict predict_log_proba = predict decision_function = predict def score(self, X=None, Y=None): if self.foo_param > 1: score = 1. else: score = 0. return score def get_params(self, deep=False): return {'foo_param': self.foo_param} def set_params(self, **params): self.foo_param = params['foo_param'] return self class LinearSVCNoScore(LinearSVC): """An LinearSVC classifier that has no score method.""" @property def score(self): raise AttributeError X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) y = np.array([1, 1, 2, 2]) def assert_grid_iter_equals_getitem(grid): assert list(grid) == [grid[i] for i in range(len(grid))] @pytest.mark.parametrize("klass", [ParameterGrid, partial(ParameterSampler, n_iter=10)]) @pytest.mark.parametrize( "input, error_type, error_message", [(0, TypeError, r'Parameter .* is not a dict or a list \(0\)'), ([{'foo': [0]}, 0], TypeError, r'Parameter .* is not a dict \(0\)'), ({'foo': 0}, TypeError, "Parameter.* value is not iterable .*" r"\(key='foo', value=0\)")] ) def test_validate_parameter_input(klass, input, error_type, error_message): with pytest.raises(error_type, match=error_message): klass(input) def test_parameter_grid(): # Test basic properties of ParameterGrid. params1 = {"foo": [1, 2, 3]} grid1 = ParameterGrid(params1) assert isinstance(grid1, Iterable) assert isinstance(grid1, Sized) assert len(grid1) == 3 assert_grid_iter_equals_getitem(grid1) params2 = {"foo": [4, 2], "bar": ["ham", "spam", "eggs"]} grid2 = ParameterGrid(params2) assert len(grid2) == 6 # loop to assert we can iterate over the grid multiple times for i in range(2): # tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2) points = set(tuple(chain(*(sorted(p.items())))) for p in grid2) assert (points == set(("bar", x, "foo", y) for x, y in product(params2["bar"], params2["foo"]))) assert_grid_iter_equals_getitem(grid2) # Special case: empty grid (useful to get default estimator settings) empty = ParameterGrid({}) assert len(empty) == 1 assert list(empty) == [{}] assert_grid_iter_equals_getitem(empty) assert_raises(IndexError, lambda: empty[1]) has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}]) assert len(has_empty) == 4 assert list(has_empty) == [{'C': 1}, {'C': 10}, {}, {'C': .5}] assert_grid_iter_equals_getitem(has_empty) def test_grid_search(): # Test that the best estimator contains the right value for foo_param clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=3, verbose=3) # make sure it selects the smallest parameter in case of ties old_stdout = sys.stdout sys.stdout = StringIO() grid_search.fit(X, y) sys.stdout = old_stdout assert grid_search.best_estimator_.foo_param == 2 assert_array_equal(grid_search.cv_results_["param_foo_param"].data, [1, 2, 3]) # Smoke test the score etc: grid_search.score(X, y) grid_search.predict_proba(X) grid_search.decision_function(X) grid_search.transform(X) # Test exception handling on scoring grid_search.scoring = 'sklearn' assert_raises(ValueError, grid_search.fit, X, y) def test_grid_search_pipeline_steps(): # check that parameters that are estimators are cloned before fitting pipe = Pipeline([('regressor', LinearRegression())]) param_grid = {'regressor': [LinearRegression(), Ridge()]} grid_search = GridSearchCV(pipe, param_grid, cv=2) grid_search.fit(X, y) regressor_results = grid_search.cv_results_['param_regressor'] assert isinstance(regressor_results[0], LinearRegression) assert isinstance(regressor_results[1], Ridge) assert not hasattr(regressor_results[0], 'coef_') assert not hasattr(regressor_results[1], 'coef_') assert regressor_results[0] is not grid_search.best_estimator_ assert regressor_results[1] is not grid_search.best_estimator_ # check that we didn't modify the parameter grid that was passed assert not hasattr(param_grid['regressor'][0], 'coef_') assert not hasattr(param_grid['regressor'][1], 'coef_') @pytest.mark.parametrize("SearchCV", [GridSearchCV, RandomizedSearchCV]) def test_SearchCV_with_fit_params(SearchCV): X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = CheckingClassifier(expected_fit_params=['spam', 'eggs']) searcher = SearchCV( clf, {'foo_param': [1, 2, 3]}, cv=2, error_score="raise" ) # The CheckingClassifier generates an assertion error if # a parameter is missing or has length != len(X). err_msg = r"Expected fit parameter\(s\) \['eggs'\] not seen." with pytest.raises(AssertionError, match=err_msg): searcher.fit(X, y, spam=np.ones(10)) err_msg = "Fit parameter spam has length 1; expected" with pytest.raises(AssertionError, match=err_msg): searcher.fit(X, y, spam=np.ones(1), eggs=np.zeros(10)) searcher.fit(X, y, spam=np.ones(10), eggs=np.zeros(10)) @ignore_warnings def test_grid_search_no_score(): # Test grid-search on classifier that has no score function. clf = LinearSVC(random_state=0) X, y = make_blobs(random_state=0, centers=2) Cs = [.1, 1, 10] clf_no_score = LinearSVCNoScore(random_state=0) grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy') grid_search.fit(X, y) grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs}, scoring='accuracy') # smoketest grid search grid_search_no_score.fit(X, y) # check that best params are equal assert grid_search_no_score.best_params_ == grid_search.best_params_ # check that we can call score and that it gives the correct result assert grid_search.score(X, y) == grid_search_no_score.score(X, y) # giving no scoring function raises an error grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs}) assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit, [[1]]) def test_grid_search_score_method(): X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2, random_state=0) clf = LinearSVC(random_state=0) grid = {'C': [.1]} search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y) search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y) search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid, scoring='roc_auc' ).fit(X, y) search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y) # Check warning only occurs in situation where behavior changed: # estimator requires score method to compete with scoring parameter score_no_scoring = search_no_scoring.score(X, y) score_accuracy = search_accuracy.score(X, y) score_no_score_auc = search_no_score_method_auc.score(X, y) score_auc = search_auc.score(X, y) # ensure the test is sane assert score_auc < 1.0 assert score_accuracy < 1.0 assert score_auc != score_accuracy assert_almost_equal(score_accuracy, score_no_scoring) assert_almost_equal(score_auc, score_no_score_auc) def test_grid_search_groups(): # Check if ValueError (when groups is None) propagates to GridSearchCV # And also check if groups is correctly passed to the cv object rng = np.random.RandomState(0) X, y = make_classification(n_samples=15, n_classes=2, random_state=0) groups = rng.randint(0, 3, 15) clf = LinearSVC(random_state=0) grid = {'C': [1]} group_cvs = [LeaveOneGroupOut(), LeavePGroupsOut(2), GroupKFold(n_splits=3), GroupShuffleSplit()] for cv in group_cvs: gs = GridSearchCV(clf, grid, cv=cv) assert_raise_message(ValueError, "The 'groups' parameter should not be None.", gs.fit, X, y) gs.fit(X, y, groups=groups) non_group_cvs = [StratifiedKFold(), StratifiedShuffleSplit()] for cv in non_group_cvs: gs = GridSearchCV(clf, grid, cv=cv) # Should not raise an error gs.fit(X, y) def test_classes__property(): # Test that classes_ property matches best_estimator_.classes_ X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) Cs = [.1, 1, 10] grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs}) grid_search.fit(X, y) assert_array_equal(grid_search.best_estimator_.classes_, grid_search.classes_) # Test that regressors do not have a classes_ attribute grid_search = GridSearchCV(Ridge(), {'alpha': [1.0, 2.0]}) grid_search.fit(X, y) assert not hasattr(grid_search, 'classes_') # Test that the grid searcher has no classes_ attribute before it's fit grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs}) assert not hasattr(grid_search, 'classes_') # Test that the grid searcher has no classes_ attribute without a refit grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs}, refit=False) grid_search.fit(X, y) assert not hasattr(grid_search, 'classes_') def test_trivial_cv_results_attr(): # Test search over a "grid" with only one point. clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1]}, cv=3) grid_search.fit(X, y) assert hasattr(grid_search, "cv_results_") random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1, cv=3) random_search.fit(X, y) assert hasattr(grid_search, "cv_results_") def test_no_refit(): # Test that GSCV can be used for model selection alone without refitting clf = MockClassifier() for scoring in [None, ['accuracy', 'precision']]: grid_search = GridSearchCV( clf, {'foo_param': [1, 2, 3]}, refit=False, cv=3 ) grid_search.fit(X, y) assert not hasattr(grid_search, "best_estimator_") and \ hasattr(grid_search, "best_index_") and \ hasattr(grid_search, "best_params_") # Make sure the functions predict/transform etc raise meaningful # error messages for fn_name in ('predict', 'predict_proba', 'predict_log_proba', 'transform', 'inverse_transform'): assert_raise_message(NotFittedError, ('refit=False. %s is available only after ' 'refitting on the best parameters' % fn_name), getattr(grid_search, fn_name), X) # Test that an invalid refit param raises appropriate error messages for refit in ["", 5, True, 'recall', 'accuracy']: assert_raise_message(ValueError, "For multi-metric scoring, the " "parameter refit must be set to a scorer key", GridSearchCV(clf, {}, refit=refit, scoring={'acc': 'accuracy', 'prec': 'precision'} ).fit, X, y) def test_grid_search_error(): # Test that grid search will capture errors on data with different length X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) assert_raises(ValueError, cv.fit, X_[:180], y_) def test_grid_search_one_grid_point(): X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]} clf = SVC(gamma='auto') cv = GridSearchCV(clf, param_dict) cv.fit(X_, y_) clf = SVC(C=1.0, kernel="rbf", gamma=0.1) clf.fit(X_, y_) assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_) def test_grid_search_when_param_grid_includes_range(): # Test that the best estimator contains the right value for foo_param clf = MockClassifier() grid_search = None grid_search = GridSearchCV(clf, {'foo_param': range(1, 4)}, cv=3) grid_search.fit(X, y) assert grid_search.best_estimator_.foo_param == 2 def test_grid_search_bad_param_grid(): param_dict = {"C": 1} clf = SVC(gamma='auto') assert_raise_message( ValueError, "Parameter grid for parameter (C) needs to" " be a list or numpy array, but got (<class 'int'>)." " Single values need to be wrapped in a list" " with one element.", GridSearchCV, clf, param_dict) param_dict = {"C": []} clf = SVC() assert_raise_message( ValueError, "Parameter values for parameter (C) need to be a non-empty sequence.", GridSearchCV, clf, param_dict) param_dict = {"C": "1,2,3"} clf = SVC(gamma='auto') assert_raise_message( ValueError, "Parameter grid for parameter (C) needs to" " be a list or numpy array, but got (<class 'str'>)." " Single values need to be wrapped in a list" " with one element.", GridSearchCV, clf, param_dict) param_dict = {"C": np.ones((3, 2))} clf = SVC() assert_raises(ValueError, GridSearchCV, clf, param_dict) def test_grid_search_sparse(): # Test that grid search works with both dense and sparse matrices X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) cv.fit(X_[:180], y_[:180]) y_pred = cv.predict(X_[180:]) C = cv.best_estimator_.C X_ = sp.csr_matrix(X_) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) cv.fit(X_[:180].tocoo(), y_[:180]) y_pred2 = cv.predict(X_[180:]) C2 = cv.best_estimator_.C assert np.mean(y_pred == y_pred2) >= .9 assert C == C2 def test_grid_search_sparse_scoring(): X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1") cv.fit(X_[:180], y_[:180]) y_pred = cv.predict(X_[180:]) C = cv.best_estimator_.C X_ = sp.csr_matrix(X_) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1") cv.fit(X_[:180], y_[:180]) y_pred2 = cv.predict(X_[180:]) C2 = cv.best_estimator_.C assert_array_equal(y_pred, y_pred2) assert C == C2 # Smoke test the score # np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]), # cv.score(X_[:180], y[:180])) # test loss where greater is worse def f1_loss(y_true_, y_pred_): return -f1_score(y_true_, y_pred_) F1Loss = make_scorer(f1_loss, greater_is_better=False) cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss) cv.fit(X_[:180], y_[:180]) y_pred3 = cv.predict(X_[180:]) C3 = cv.best_estimator_.C assert C == C3 assert_array_equal(y_pred, y_pred3) def test_grid_search_precomputed_kernel(): # Test that grid search works when the input features are given in the # form of a precomputed kernel matrix X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) # compute the training kernel matrix corresponding to the linear kernel K_train = np.dot(X_[:180], X_[:180].T) y_train = y_[:180] clf = SVC(kernel='precomputed') cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) cv.fit(K_train, y_train) assert cv.best_score_ >= 0 # compute the test kernel matrix K_test = np.dot(X_[180:], X_[:180].T) y_test = y_[180:] y_pred = cv.predict(K_test) assert np.mean(y_pred == y_test) >= 0 # test error is raised when the precomputed kernel is not array-like # or sparse assert_raises(ValueError, cv.fit, K_train.tolist(), y_train) def test_grid_search_precomputed_kernel_error_nonsquare(): # Test that grid search returns an error with a non-square precomputed # training kernel matrix K_train = np.zeros((10, 20)) y_train = np.ones((10, )) clf = SVC(kernel='precomputed') cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) assert_raises(ValueError, cv.fit, K_train, y_train) class BrokenClassifier(BaseEstimator): """Broken classifier that cannot be fit twice""" def __init__(self, parameter=None): self.parameter = parameter def fit(self, X, y): assert not hasattr(self, 'has_been_fit_') self.has_been_fit_ = True def predict(self, X): return np.zeros(X.shape[0]) @ignore_warnings def test_refit(): # Regression test for bug in refitting # Simulates re-fitting a broken estimator; this used to break with # sparse SVMs. X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}], scoring="precision", refit=True) clf.fit(X, y) def test_refit_callable(): """ Test refit=callable, which adds flexibility in identifying the "best" estimator. """ def refit_callable(cv_results): """ A dummy function tests `refit=callable` interface. Return the index of a model that has the least `mean_test_score`. """ # Fit a dummy clf with `refit=True` to get a list of keys in # clf.cv_results_. X, y = make_classification(n_samples=100, n_features=4, random_state=42) clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.01, 0.1, 1]}, scoring='precision', refit=True) clf.fit(X, y) # Ensure that `best_index_ != 0` for this dummy clf assert clf.best_index_ != 0 # Assert every key matches those in `cv_results` for key in clf.cv_results_.keys(): assert key in cv_results return cv_results['mean_test_score'].argmin() X, y = make_classification(n_samples=100, n_features=4, random_state=42) clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.01, 0.1, 1]}, scoring='precision', refit=refit_callable) clf.fit(X, y) assert clf.best_index_ == 0 # Ensure `best_score_` is disabled when using `refit=callable` assert not hasattr(clf, 'best_score_') def test_refit_callable_invalid_type(): """ Test implementation catches the errors when 'best_index_' returns an invalid result. """ def refit_callable_invalid_type(cv_results): """ A dummy function tests when returned 'best_index_' is not integer. """ return None X, y = make_classification(n_samples=100, n_features=4, random_state=42) clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.1, 1]}, scoring='precision', refit=refit_callable_invalid_type) with pytest.raises(TypeError, match='best_index_ returned is not an integer'): clf.fit(X, y) @pytest.mark.parametrize('out_bound_value', [-1, 2]) @pytest.mark.parametrize('search_cv', [RandomizedSearchCV, GridSearchCV]) def test_refit_callable_out_bound(out_bound_value, search_cv): """ Test implementation catches the errors when 'best_index_' returns an out of bound result. """ def refit_callable_out_bound(cv_results): """ A dummy function tests when returned 'best_index_' is out of bounds. """ return out_bound_value X, y = make_classification(n_samples=100, n_features=4, random_state=42) clf = search_cv(LinearSVC(random_state=42), {'C': [0.1, 1]}, scoring='precision', refit=refit_callable_out_bound) with pytest.raises(IndexError, match='best_index_ index out of range'): clf.fit(X, y) def test_refit_callable_multi_metric(): """ Test refit=callable in multiple metric evaluation setting """ def refit_callable(cv_results): """ A dummy function tests `refit=callable` interface. Return the index of a model that has the least `mean_test_prec`. """ assert 'mean_test_prec' in cv_results return cv_results['mean_test_prec'].argmin() X, y = make_classification(n_samples=100, n_features=4, random_state=42) scoring = {'Accuracy': make_scorer(accuracy_score), 'prec': 'precision'} clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.01, 0.1, 1]}, scoring=scoring, refit=refit_callable) clf.fit(X, y) assert clf.best_index_ == 0 # Ensure `best_score_` is disabled when using `refit=callable` assert not hasattr(clf, 'best_score_') def test_gridsearch_nd(): # Pass X as list in GridSearchCV X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2) y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11) check_X = lambda x: x.shape[1:] == (5, 3, 2) check_y = lambda x: x.shape[1:] == (7, 11) clf = CheckingClassifier( check_X=check_X, check_y=check_y, methods_to_check=["fit"], ) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}) grid_search.fit(X_4d, y_3d).score(X, y) assert hasattr(grid_search, "cv_results_") def test_X_as_list(): # Pass X as list in GridSearchCV X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = CheckingClassifier( check_X=lambda x: isinstance(x, list), methods_to_check=["fit"], ) cv = KFold(n_splits=3) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv) grid_search.fit(X.tolist(), y).score(X, y) assert hasattr(grid_search, "cv_results_") def test_y_as_list(): # Pass y as list in GridSearchCV X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = CheckingClassifier( check_y=lambda x: isinstance(x, list), methods_to_check=["fit"], ) cv = KFold(n_splits=3) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv) grid_search.fit(X, y.tolist()).score(X, y) assert hasattr(grid_search, "cv_results_") @ignore_warnings def test_pandas_input(): # check cross_val_score doesn't destroy pandas dataframe types = [(MockDataFrame, MockDataFrame)] try: from pandas import Series, DataFrame types.append((DataFrame, Series)) except ImportError: pass X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) for InputFeatureType, TargetType in types: # X dataframe, y series X_df, y_ser = InputFeatureType(X), TargetType(y) def check_df(x): return isinstance(x, InputFeatureType) def check_series(x): return isinstance(x, TargetType) clf = CheckingClassifier(check_X=check_df, check_y=check_series) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}) grid_search.fit(X_df, y_ser).score(X_df, y_ser) grid_search.predict(X_df) assert hasattr(grid_search, "cv_results_") def test_unsupervised_grid_search(): # test grid-search with unsupervised estimator X, y = make_blobs(n_samples=50, random_state=0) km = KMeans(random_state=0, init="random", n_init=1) # Multi-metric evaluation unsupervised scoring = ['adjusted_rand_score', 'fowlkes_mallows_score'] for refit in ['adjusted_rand_score', 'fowlkes_mallows_score']: grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]), scoring=scoring, refit=refit) grid_search.fit(X, y) # Both ARI and FMS can find the right number :) assert grid_search.best_params_["n_clusters"] == 3 # Single metric evaluation unsupervised grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]), scoring='fowlkes_mallows_score') grid_search.fit(X, y) assert grid_search.best_params_["n_clusters"] == 3 # Now without a score, and without y grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4])) grid_search.fit(X) assert grid_search.best_params_["n_clusters"] == 4 def test_gridsearch_no_predict(): # test grid-search with an estimator without predict. # slight duplication of a test from KDE def custom_scoring(estimator, X): return 42 if estimator.bandwidth == .1 else 0 X, _ = make_blobs(cluster_std=.1, random_state=1, centers=[[0, 1], [1, 0], [0, 0]]) search = GridSearchCV(KernelDensity(), param_grid=dict(bandwidth=[.01, .1, 1]), scoring=custom_scoring) search.fit(X) assert search.best_params_['bandwidth'] == .1 assert search.best_score_ == 42 def test_param_sampler(): # test basic properties of param sampler param_distributions = {"kernel": ["rbf", "linear"], "C": uniform(0, 1)} sampler = ParameterSampler(param_distributions=param_distributions, n_iter=10, random_state=0) samples = [x for x in sampler] assert len(samples) == 10 for sample in samples: assert sample["kernel"] in ["rbf", "linear"] assert 0 <= sample["C"] <= 1 # test that repeated calls yield identical parameters param_distributions = {"C": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} sampler = ParameterSampler(param_distributions=param_distributions, n_iter=3, random_state=0) assert [x for x in sampler] == [x for x in sampler] if sp_version >= (0, 16): param_distributions = {"C": uniform(0, 1)} sampler = ParameterSampler(param_distributions=param_distributions, n_iter=10, random_state=0) assert [x for x in sampler] == [x for x in sampler] def check_cv_results_array_types(search, param_keys, score_keys): # Check if the search `cv_results`'s array are of correct types cv_results = search.cv_results_ assert all(isinstance(cv_results[param], np.ma.MaskedArray) for param in param_keys) assert all(cv_results[key].dtype == object for key in param_keys) assert not any(isinstance(cv_results[key], np.ma.MaskedArray) for key in score_keys) assert all(cv_results[key].dtype == np.float64 for key in score_keys if not key.startswith('rank')) scorer_keys = search.scorer_.keys() if search.multimetric_ else ['score'] for key in scorer_keys: assert cv_results['rank_test_%s' % key].dtype == np.int32 def check_cv_results_keys(cv_results, param_keys, score_keys, n_cand): # Test the search.cv_results_ contains all the required results assert_array_equal(sorted(cv_results.keys()), sorted(param_keys + score_keys + ('params',))) assert all(cv_results[key].shape == (n_cand,) for key in param_keys + score_keys) def test_grid_search_cv_results(): X, y = make_classification(n_samples=50, n_features=4, random_state=42) n_splits = 3 n_grid_points = 6 params = [dict(kernel=['rbf', ], C=[1, 10], gamma=[0.1, 1]), dict(kernel=['poly', ], degree=[1, 2])] param_keys = ('param_C', 'param_degree', 'param_gamma', 'param_kernel') score_keys = ('mean_test_score', 'mean_train_score', 'rank_test_score', 'split0_test_score', 'split1_test_score', 'split2_test_score', 'split0_train_score', 'split1_train_score', 'split2_train_score', 'std_test_score', 'std_train_score', 'mean_fit_time', 'std_fit_time', 'mean_score_time', 'std_score_time') n_candidates = n_grid_points search = GridSearchCV(SVC(), cv=n_splits, param_grid=params, return_train_score=True) search.fit(X, y) cv_results = search.cv_results_ # Check if score and timing are reasonable assert all(cv_results['rank_test_score'] >= 1) assert (all(cv_results[k] >= 0) for k in score_keys if k != 'rank_test_score') assert (all(cv_results[k] <= 1) for k in score_keys if 'time' not in k and k != 'rank_test_score') # Check cv_results structure check_cv_results_array_types(search, param_keys, score_keys) check_cv_results_keys(cv_results, param_keys, score_keys, n_candidates) # Check masking cv_results = search.cv_results_ n_candidates = len(search.cv_results_['params']) assert all((cv_results['param_C'].mask[i] and cv_results['param_gamma'].mask[i] and not cv_results['param_degree'].mask[i]) for i in range(n_candidates) if cv_results['param_kernel'][i] == 'linear') assert all((not cv_results['param_C'].mask[i] and not cv_results['param_gamma'].mask[i] and cv_results['param_degree'].mask[i]) for i in range(n_candidates) if cv_results['param_kernel'][i] == 'rbf') def test_random_search_cv_results(): X, y = make_classification(n_samples=50, n_features=4, random_state=42) n_splits = 3 n_search_iter = 30 params = [{'kernel': ['rbf'], 'C': expon(scale=10), 'gamma': expon(scale=0.1)}, {'kernel': ['poly'], 'degree': [2, 3]}] param_keys = ('param_C', 'param_degree', 'param_gamma', 'param_kernel') score_keys = ('mean_test_score', 'mean_train_score', 'rank_test_score', 'split0_test_score', 'split1_test_score', 'split2_test_score', 'split0_train_score', 'split1_train_score', 'split2_train_score', 'std_test_score', 'std_train_score', 'mean_fit_time', 'std_fit_time', 'mean_score_time', 'std_score_time') n_cand = n_search_iter search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_splits, param_distributions=params, return_train_score=True) search.fit(X, y) cv_results = search.cv_results_ # Check results structure check_cv_results_array_types(search, param_keys, score_keys) check_cv_results_keys(cv_results, param_keys, score_keys, n_cand) n_candidates = len(search.cv_results_['params']) assert all((cv_results['param_C'].mask[i] and cv_results['param_gamma'].mask[i] and not cv_results['param_degree'].mask[i]) for i in range(n_candidates) if cv_results['param_kernel'][i] == 'linear') assert all((not cv_results['param_C'].mask[i] and not cv_results['param_gamma'].mask[i] and cv_results['param_degree'].mask[i]) for i in range(n_candidates) if cv_results['param_kernel'][i] == 'rbf') @pytest.mark.parametrize( "SearchCV, specialized_params", [(GridSearchCV, {'param_grid': {'C': [1, 10]}}), (RandomizedSearchCV, {'param_distributions': {'C': [1, 10]}, 'n_iter': 2})] ) def test_search_default_iid(SearchCV, specialized_params): # Test the IID parameter TODO: Clearly this test does something else??? # noise-free simple 2d-data X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0, cluster_std=0.1, shuffle=False, n_samples=80) # split dataset into two folds that are not iid # first one contains data of all 4 blobs, second only from two. mask = np.ones(X.shape[0], dtype=np.bool) mask[np.where(y == 1)[0][::2]] = 0 mask[np.where(y == 2)[0][::2]] = 0 # this leads to perfect classification on one fold and a score of 1/3 on # the other # create "cv" for splits cv = [[mask, ~mask], [~mask, mask]] common_params = {'estimator': SVC(), 'cv': cv, 'return_train_score': True} search = SearchCV(**common_params, **specialized_params) search.fit(X, y) test_cv_scores = np.array( [search.cv_results_['split%d_test_score' % s][0] for s in range(search.n_splits_)] ) test_mean = search.cv_results_['mean_test_score'][0] test_std = search.cv_results_['std_test_score'][0] train_cv_scores = np.array( [search.cv_results_['split%d_train_score' % s][0] for s in range(search.n_splits_)] ) train_mean = search.cv_results_['mean_train_score'][0] train_std = search.cv_results_['std_train_score'][0] assert search.cv_results_['param_C'][0] == 1 # scores are the same as above assert_allclose(test_cv_scores, [1, 1. / 3.]) assert_allclose(train_cv_scores, [1, 1]) # Unweighted mean/std is used assert test_mean == pytest.approx(np.mean(test_cv_scores)) assert test_std == pytest.approx(np.std(test_cv_scores)) # For the train scores, we do not take a weighted mean irrespective of # i.i.d. or not assert train_mean == pytest.approx(1) assert train_std == pytest.approx(0) def test_grid_search_cv_results_multimetric(): X, y = make_classification(n_samples=50, n_features=4, random_state=42) n_splits = 3 params = [dict(kernel=['rbf', ], C=[1, 10], gamma=[0.1, 1]), dict(kernel=['poly', ], degree=[1, 2])] grid_searches = [] for scoring in ({'accuracy': make_scorer(accuracy_score), 'recall': make_scorer(recall_score)}, 'accuracy', 'recall'): grid_search = GridSearchCV(SVC(), cv=n_splits, param_grid=params, scoring=scoring, refit=False) grid_search.fit(X, y) grid_searches.append(grid_search) compare_cv_results_multimetric_with_single(*grid_searches) def test_random_search_cv_results_multimetric(): X, y = make_classification(n_samples=50, n_features=4, random_state=42) n_splits = 3 n_search_iter = 30 # Scipy 0.12's stats dists do not accept seed, hence we use param grid params = dict(C=np.logspace(-4, 1, 3), gamma=np.logspace(-5, 0, 3, base=0.1)) for refit in (True, False): random_searches = [] for scoring in (('accuracy', 'recall'), 'accuracy', 'recall'): # If True, for multi-metric pass refit='accuracy' if refit: probability = True refit = 'accuracy' if isinstance(scoring, tuple) else refit else: probability = False clf = SVC(probability=probability, random_state=42) random_search = RandomizedSearchCV(clf, n_iter=n_search_iter, cv=n_splits, param_distributions=params, scoring=scoring, refit=refit, random_state=0) random_search.fit(X, y) random_searches.append(random_search) compare_cv_results_multimetric_with_single(*random_searches) compare_refit_methods_when_refit_with_acc( random_searches[0], random_searches[1], refit) def compare_cv_results_multimetric_with_single( search_multi, search_acc, search_rec): """Compare multi-metric cv_results with the ensemble of multiple single metric cv_results from single metric grid/random search""" assert search_multi.multimetric_ assert_array_equal(sorted(search_multi.scorer_), ('accuracy', 'recall')) cv_results_multi = search_multi.cv_results_ cv_results_acc_rec = {re.sub('_score$', '_accuracy', k): v for k, v in search_acc.cv_results_.items()} cv_results_acc_rec.update({re.sub('_score$', '_recall', k): v for k, v in search_rec.cv_results_.items()}) # Check if score and timing are reasonable, also checks if the keys # are present assert all((np.all(cv_results_multi[k] <= 1) for k in ( 'mean_score_time', 'std_score_time', 'mean_fit_time', 'std_fit_time'))) # Compare the keys, other than time keys, among multi-metric and # single metric grid search results. np.testing.assert_equal performs a # deep nested comparison of the two cv_results dicts np.testing.assert_equal({k: v for k, v in cv_results_multi.items() if not k.endswith('_time')}, {k: v for k, v in cv_results_acc_rec.items() if not k.endswith('_time')}) def compare_refit_methods_when_refit_with_acc(search_multi, search_acc, refit): """Compare refit multi-metric search methods with single metric methods""" assert search_acc.refit == refit if refit: assert search_multi.refit == 'accuracy' else: assert not search_multi.refit return # search cannot predict/score without refit X, y = make_blobs(n_samples=100, n_features=4, random_state=42) for method in ('predict', 'predict_proba', 'predict_log_proba'): assert_almost_equal(getattr(search_multi, method)(X), getattr(search_acc, method)(X)) assert_almost_equal(search_multi.score(X, y), search_acc.score(X, y)) for key in ('best_index_', 'best_score_', 'best_params_'): assert getattr(search_multi, key) == getattr(search_acc, key) def test_search_cv_results_rank_tie_breaking(): X, y = make_blobs(n_samples=50, random_state=42) # The two C values are close enough to give similar models # which would result in a tie of their mean cv-scores param_grid = {'C': [1, 1.001, 0.001]} grid_search = GridSearchCV(SVC(), param_grid=param_grid, return_train_score=True) random_search = RandomizedSearchCV(SVC(), n_iter=3, param_distributions=param_grid, return_train_score=True) for search in (grid_search, random_search): search.fit(X, y) cv_results = search.cv_results_ # Check tie breaking strategy - # Check that there is a tie in the mean scores between # candidates 1 and 2 alone assert_almost_equal(cv_results['mean_test_score'][0], cv_results['mean_test_score'][1]) assert_almost_equal(cv_results['mean_train_score'][0], cv_results['mean_train_score'][1]) assert not np.allclose(cv_results['mean_test_score'][1], cv_results['mean_test_score'][2]) assert not np.allclose(cv_results['mean_train_score'][1], cv_results['mean_train_score'][2]) # 'min' rank should be assigned to the tied candidates assert_almost_equal(search.cv_results_['rank_test_score'], [1, 1, 3]) def test_search_cv_results_none_param(): X, y = [[1], [2], [3], [4], [5]], [0, 0, 0, 0, 1] estimators = (DecisionTreeRegressor(), DecisionTreeClassifier()) est_parameters = {"random_state": [0, None]} cv = KFold() for est in estimators: grid_search = GridSearchCV(est, est_parameters, cv=cv, ).fit(X, y) assert_array_equal(grid_search.cv_results_['param_random_state'], [0, None]) @ignore_warnings() def test_search_cv_timing(): svc = LinearSVC(random_state=0) X = [[1, ], [2, ], [3, ], [4, ]] y = [0, 1, 1, 0] gs = GridSearchCV(svc, {'C': [0, 1]}, cv=2, error_score=0) rs = RandomizedSearchCV(svc, {'C': [0, 1]}, cv=2, error_score=0, n_iter=2) for search in (gs, rs): search.fit(X, y) for key in ['mean_fit_time', 'std_fit_time']: # NOTE The precision of time.time in windows is not high # enough for the fit/score times to be non-zero for trivial X and y assert np.all(search.cv_results_[key] >= 0) assert np.all(search.cv_results_[key] < 1) for key in ['mean_score_time', 'std_score_time']: assert search.cv_results_[key][1] >= 0 assert search.cv_results_[key][0] == 0.0 assert np.all(search.cv_results_[key] < 1) assert hasattr(search, "refit_time_") assert isinstance(search.refit_time_, float) assert search.refit_time_ >= 0 def test_grid_search_correct_score_results(): # test that correct scores are used n_splits = 3 clf = LinearSVC(random_state=0) X, y = make_blobs(random_state=0, centers=2) Cs = [.1, 1, 10] for score in ['f1', 'roc_auc']: grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score, cv=n_splits) cv_results = grid_search.fit(X, y).cv_results_ # Test scorer names result_keys = list(cv_results.keys()) expected_keys = (("mean_test_score", "rank_test_score") + tuple("split%d_test_score" % cv_i for cv_i in range(n_splits))) assert all(np.in1d(expected_keys, result_keys)) cv = StratifiedKFold(n_splits=n_splits) n_splits = grid_search.n_splits_ for candidate_i, C in enumerate(Cs): clf.set_params(C=C) cv_scores = np.array( list(grid_search.cv_results_['split%d_test_score' % s][candidate_i] for s in range(n_splits))) for i, (train, test) in enumerate(cv.split(X, y)): clf.fit(X[train], y[train]) if score == "f1": correct_score = f1_score(y[test], clf.predict(X[test])) elif score == "roc_auc": dec = clf.decision_function(X[test]) correct_score = roc_auc_score(y[test], dec) assert_almost_equal(correct_score, cv_scores[i]) # FIXME remove test_fit_grid_point as the function will be removed on 0.25 @ignore_warnings(category=FutureWarning) def test_fit_grid_point(): X, y = make_classification(random_state=0) cv = StratifiedKFold() svc = LinearSVC(random_state=0) scorer = make_scorer(accuracy_score) for params in ({'C': 0.1}, {'C': 0.01}, {'C': 0.001}): for train, test in cv.split(X, y): this_scores, this_params, n_test_samples = fit_grid_point( X, y, clone(svc), params, train, test, scorer, verbose=False) est = clone(svc).set_params(**params) est.fit(X[train], y[train]) expected_score = scorer(est, X[test], y[test]) # Test the return values of fit_grid_point assert_almost_equal(this_scores, expected_score) assert params == this_params assert n_test_samples == test.size # Should raise an error upon multimetric scorer assert_raise_message(ValueError, "For evaluating multiple scores, use " "sklearn.model_selection.cross_validate instead.", fit_grid_point, X, y, svc, params, train, test, {'score': scorer}, verbose=True) # FIXME remove test_fit_grid_point_deprecated as # fit_grid_point will be removed on 0.25 def test_fit_grid_point_deprecated(): X, y = make_classification(random_state=0) svc = LinearSVC(random_state=0) scorer = make_scorer(accuracy_score) msg = ("fit_grid_point is deprecated in version 0.23 " "and will be removed in version 0.25") params = {'C': 0.1} train, test = next(StratifiedKFold().split(X, y)) with pytest.warns(FutureWarning, match=msg): fit_grid_point(X, y, svc, params, train, test, scorer, verbose=False) def test_pickle(): # Test that a fit search can be pickled clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True, cv=3) grid_search.fit(X, y) grid_search_pickled = pickle.loads(pickle.dumps(grid_search)) assert_array_almost_equal(grid_search.predict(X), grid_search_pickled.predict(X)) random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True, n_iter=3, cv=3) random_search.fit(X, y) random_search_pickled = pickle.loads(pickle.dumps(random_search)) assert_array_almost_equal(random_search.predict(X), random_search_pickled.predict(X)) def test_grid_search_with_multioutput_data(): # Test search with multi-output estimator X, y = make_multilabel_classification(return_indicator=True, random_state=0) est_parameters = {"max_depth": [1, 2, 3, 4]} cv = KFold() estimators = [DecisionTreeRegressor(random_state=0), DecisionTreeClassifier(random_state=0)] # Test with grid search cv for est in estimators: grid_search = GridSearchCV(est, est_parameters, cv=cv) grid_search.fit(X, y) res_params = grid_search.cv_results_['params'] for cand_i in range(len(res_params)): est.set_params(**res_params[cand_i]) for i, (train, test) in enumerate(cv.split(X, y)): est.fit(X[train], y[train]) correct_score = est.score(X[test], y[test]) assert_almost_equal( correct_score, grid_search.cv_results_['split%d_test_score' % i][cand_i]) # Test with a randomized search for est in estimators: random_search = RandomizedSearchCV(est, est_parameters, cv=cv, n_iter=3) random_search.fit(X, y) res_params = random_search.cv_results_['params'] for cand_i in range(len(res_params)): est.set_params(**res_params[cand_i]) for i, (train, test) in enumerate(cv.split(X, y)): est.fit(X[train], y[train]) correct_score = est.score(X[test], y[test]) assert_almost_equal( correct_score, random_search.cv_results_['split%d_test_score' % i][cand_i]) def test_predict_proba_disabled(): # Test predict_proba when disabled on estimator. X =
np.arange(20)
numpy.arange
import os import numpy as np import pandas as pd from keras.utils import to_categorical from sklearn.model_selection import KFold, train_test_split def load_data(path): train = pd.read_json(os.path.join(path, "./train.json")) test = pd.read_json(os.path.join(path, "./test.json")) return (train, test) def preprocess(df, means=(-22.159262, -24.953745, 40.021883465782651), stds=(5.33146, 4.5463958, 4.0815391476694414)): X_band_1 = np.array([
np.array(band)
numpy.array
import numpy as np from keras.applications.inception_v3 import InceptionV3 from keras.initializers import RandomNormal from keras.layers import (BatchNormalization, Conv2D, Conv2DTranspose, Conv3D, Cropping2D, Dense, Flatten, GlobalAveragePooling2D, Input, Lambda, MaxPooling2D, Reshape, UpSampling2D, ZeroPadding2D, ZeroPadding3D, add, concatenate) from keras.layers.advanced_activations import ELU, LeakyReLU from keras.models import Model # Parameterized 2D Block Model def BlockModel2D(input_shape, filt_num=16, numBlocks=3): """Creates a Block CED model for segmentation problems Args: input shape: a list or tuple of [rows,cols,channels] of input images filt_num: the number of filters in the first and last layers This number is multipled linearly increased and decreased throughout the model numBlocks: number of processing blocks. The larger the number the deeper the model output_chan: number of output channels. Set if doing multi-class segmentation regression: Whether to have a continuous output with linear activation Returns: An unintialized Keras model Example useage: SegModel = BlockModel2D([256,256,1],filt_num=8) Notes: Using rows/cols that are powers of 2 is recommended. Otherwise, the rows/cols must be divisible by 2^numBlocks for skip connections to match up properly """ use_bn = True # check for input shape compatibility rows, cols = input_shape[0:2] assert rows % 2**numBlocks == 0, "Input rows and number of blocks are incompatible" assert cols % 2**numBlocks == 0, "Input cols and number of blocks are incompatible" # calculate size reduction startsize = np.max(input_shape[0:2]) minsize = (startsize-np.sum(2**np.arange(1, numBlocks+1)))/2**numBlocks assert minsize > 4, "Too small of input for this many blocks. Use fewer blocks or larger input" # input layer lay_input = Input(shape=input_shape, name='input_layer') # contracting blocks x = lay_input skip_list = [] for rr in range(1, numBlocks+1): x1 = Conv2D(filt_num*rr, (1, 1), padding='same', name='Conv1_{}'.format(rr))(x) if use_bn: x1 = BatchNormalization()(x1) x1 = ELU(name='elu_x1_{}'.format(rr))(x1) x3 = Conv2D(filt_num*rr, (3, 3), padding='same', name='Conv3_{}'.format(rr))(x) if use_bn: x3 = BatchNormalization()(x3) x3 = ELU(name='elu_x3_{}'.format(rr))(x3) x51 = Conv2D(filt_num*rr, (3, 3), padding='same', name='Conv51_{}'.format(rr))(x) if use_bn: x51 = BatchNormalization()(x51) x51 = ELU(name='elu_x51_{}'.format(rr))(x51) x52 = Conv2D(filt_num*rr, (3, 3), padding='same', name='Conv52_{}'.format(rr))(x51) if use_bn: x52 = BatchNormalization()(x52) x52 = ELU(name='elu_x52_{}'.format(rr))(x52) x = concatenate([x1, x3, x52], name='merge_{}'.format(rr)) x = Conv2D(filt_num*rr, (1, 1), padding='valid', name='ConvAll_{}'.format(rr))(x) if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_all_{}'.format(rr))(x) x = ZeroPadding2D(padding=(1, 1), name='PrePad_{}'.format(rr))(x) x = Conv2D(filt_num*rr, (4, 4), padding='valid', strides=(2, 2), name='DownSample_{}'.format(rr))(x) if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_downsample_{}'.format(rr))(x) x = Conv2D(filt_num*rr, (3, 3), padding='same', name='ConvClean_{}'.format(rr))(x) if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_clean_{}'.format(rr))(x) skip_list.append(x) # expanding blocks expnums = list(range(1, numBlocks+1)) expnums.reverse() for dd in expnums: if dd < len(skip_list): x = concatenate([skip_list[dd-1], x], name='skip_connect_{}'.format(dd)) x1 = Conv2D(filt_num*dd, (1, 1), padding='same', name='DeConv1_{}'.format(dd))(x) if use_bn: x1 = BatchNormalization()(x1) x1 = ELU(name='elu_Dx1_{}'.format(dd))(x1) x3 = Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConv3_{}'.format(dd))(x) if use_bn: x3 = BatchNormalization()(x3) x3 = ELU(name='elu_Dx3_{}'.format(dd))(x3) x51 = Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConv51_{}'.format(dd))(x) if use_bn: x51 = BatchNormalization()(x51) x51 = ELU(name='elu_Dx51_{}'.format(dd))(x51) x52 = Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConv52_{}'.format(dd))(x51) if use_bn: x52 = BatchNormalization()(x52) x52 = ELU(name='elu_Dx52_{}'.format(dd))(x52) x = concatenate([x1, x3, x52], name='Dmerge_{}'.format(dd)) x = Conv2D(filt_num*dd, (1, 1), padding='valid', name='DeConvAll_{}'.format(dd))(x) if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_Dall_{}'.format(dd))(x) x = UpSampling2D(size=(2, 2), name='UpSample_{}'.format(dd))(x) x = Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConvClean1_{}'.format(dd))(x) if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_Dclean1_{}'.format(dd))(x) x = Conv2D(filt_num*dd, (3, 3), padding='same', name='DeConvClean2_{}'.format(dd))(x) if use_bn: x = BatchNormalization()(x) x = ELU(name='elu_Dclean2_{}'.format(dd))(x) # classifier lay_out = Conv2D(1, (1, 1), activation='sigmoid', name='output_layer')(x) return Model(lay_input, lay_out) # Parameterized 2D Block Model def BlockModel_Classifier(input_shape, filt_num=16, numBlocks=3): """Creates a Block model for pretraining on classification task Args: input shape: a list or tuple of [rows,cols,channels] of input images filt_num: the number of filters in the first and last layers This number is multipled linearly increased and decreased throughout the model numBlocks: number of processing blocks. The larger the number the deeper the model output_chan: number of output channels. Set if doing multi-class segmentation regression: Whether to have a continuous output with linear activation Returns: An unintialized Keras model Example useage: SegModel = BlockModel2D([256,256,1],filt_num=8) Notes: Using rows/cols that are powers of 2 is recommended. Otherwise, the rows/cols must be divisible by 2^numBlocks for skip connections to match up properly """ use_bn = True # check for input shape compatibility rows, cols = input_shape[0:2] assert rows % 2**numBlocks == 0, "Input rows and number of blocks are incompatible" assert cols % 2**numBlocks == 0, "Input cols and number of blocks are incompatible" # calculate size reduction startsize =
np.max(input_shape[0:2])
numpy.max
# pylint: disable=protected-access """ Test the wrappers for the C API. """ import os from contextlib import contextmanager import numpy as np import numpy.testing as npt import pandas as pd import pytest import xarray as xr from packaging.version import Version from pygmt import Figure, clib from pygmt.clib.conversion import dataarray_to_matrix from pygmt.clib.session import FAMILIES, VIAS from pygmt.exceptions import ( GMTCLibError, GMTCLibNoSessionError, GMTInvalidInput, GMTVersionError, ) from pygmt.helpers import GMTTempFile TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data") with clib.Session() as _lib: gmt_version = Version(_lib.info["version"]) @contextmanager def mock(session, func, returns=None, mock_func=None): """ Mock a GMT C API function to make it always return a given value. Used to test that exceptions are raised when API functions fail by producing a NULL pointer as output or non-zero status codes. Needed because it's not easy to get some API functions to fail without inducing a Segmentation Fault (which is a good thing because libgmt usually only fails with errors). """ if mock_func is None: def mock_api_function(*args): # pylint: disable=unused-argument """ A mock GMT API function that always returns a given value. """ return returns mock_func = mock_api_function get_libgmt_func = session.get_libgmt_func def mock_get_libgmt_func(name, argtypes=None, restype=None): """ Return our mock function. """ if name == func: return mock_func return get_libgmt_func(name, argtypes, restype) setattr(session, "get_libgmt_func", mock_get_libgmt_func) yield setattr(session, "get_libgmt_func", get_libgmt_func) def test_getitem(): """ Test that I can get correct constants from the C lib. """ ses = clib.Session() assert ses["GMT_SESSION_EXTERNAL"] != -99999 assert ses["GMT_MODULE_CMD"] != -99999 assert ses["GMT_PAD_DEFAULT"] != -99999 assert ses["GMT_DOUBLE"] != -99999 with pytest.raises(GMTCLibError): ses["A_WHOLE_LOT_OF_JUNK"] # pylint: disable=pointless-statement def test_create_destroy_session(): """ Test that create and destroy session are called without errors. """ # Create two session and make sure they are not pointing to the same memory session1 = clib.Session() session1.create(name="test_session1") assert session1.session_pointer is not None session2 = clib.Session() session2.create(name="test_session2") assert session2.session_pointer is not None assert session2.session_pointer != session1.session_pointer session1.destroy() session2.destroy() # Create and destroy a session twice ses = clib.Session() for __ in range(2): with pytest.raises(GMTCLibNoSessionError): ses.session_pointer # pylint: disable=pointless-statement ses.create("session1") assert ses.session_pointer is not None ses.destroy() with pytest.raises(GMTCLibNoSessionError): ses.session_pointer # pylint: disable=pointless-statement def test_create_session_fails(): """ Check that an exception is raised when failing to create a session. """ ses = clib.Session() with mock(ses, "GMT_Create_Session", returns=None): with pytest.raises(GMTCLibError): ses.create("test-session-name") # Should fail if trying to create a session before destroying the old one. ses.create("test1") with pytest.raises(GMTCLibError): ses.create("test2") def test_destroy_session_fails(): """ Fail to destroy session when given bad input. """ ses = clib.Session() with pytest.raises(GMTCLibNoSessionError): ses.destroy() ses.create("test-session") with mock(ses, "GMT_Destroy_Session", returns=1): with pytest.raises(GMTCLibError): ses.destroy() ses.destroy() def test_call_module(): """ Run a command to see if call_module works. """ data_fname = os.path.join(TEST_DATA_DIR, "points.txt") out_fname = "test_call_module.txt" with clib.Session() as lib: with GMTTempFile() as out_fname: lib.call_module("info", "{} -C ->{}".format(data_fname, out_fname.name)) assert os.path.exists(out_fname.name) output = out_fname.read().strip() assert output == "11.5309 61.7074 -2.9289 7.8648 0.1412 0.9338" def test_call_module_invalid_arguments(): """ Fails for invalid module arguments. """ with clib.Session() as lib: with pytest.raises(GMTCLibError): lib.call_module("info", "bogus-data.bla") def test_call_module_invalid_name(): """ Fails when given bad input. """ with clib.Session() as lib: with pytest.raises(GMTCLibError): lib.call_module("meh", "") def test_call_module_error_message(): """ Check is the GMT error message was captured. """ with clib.Session() as lib: try: lib.call_module("info", "bogus-data.bla") except GMTCLibError as error: assert "Module 'info' failed with status code" in str(error) assert "gmtinfo [ERROR]: Cannot find file bogus-data.bla" in str(error) def test_method_no_session(): """ Fails when not in a session. """ # Create an instance of Session without "with" so no session is created. lib = clib.Session() with pytest.raises(GMTCLibNoSessionError): lib.call_module("gmtdefaults", "") with pytest.raises(GMTCLibNoSessionError): lib.session_pointer # pylint: disable=pointless-statement def test_parse_constant_single(): """ Parsing a single family argument correctly. """ lib = clib.Session() for family in FAMILIES: parsed = lib._parse_constant(family, valid=FAMILIES) assert parsed == lib[family] def test_parse_constant_composite(): """ Parsing a composite constant argument (separated by |) correctly. """ lib = clib.Session() test_cases = ((family, via) for family in FAMILIES for via in VIAS) for family, via in test_cases: composite = "|".join([family, via]) expected = lib[family] + lib[via] parsed = lib._parse_constant(composite, valid=FAMILIES, valid_modifiers=VIAS) assert parsed == expected def test_parse_constant_fails(): """ Check if the function fails when given bad input. """ lib = clib.Session() test_cases = [ "SOME_random_STRING", "GMT_IS_DATASET|GMT_VIA_MATRIX|GMT_VIA_VECTOR", "GMT_IS_DATASET|NOT_A_PROPER_VIA", "NOT_A_PROPER_FAMILY|GMT_VIA_MATRIX", "NOT_A_PROPER_FAMILY|ALSO_INVALID", ] for test_case in test_cases: with pytest.raises(GMTInvalidInput): lib._parse_constant(test_case, valid=FAMILIES, valid_modifiers=VIAS) # Should also fail if not given valid modifiers but is using them anyway. # This should work... lib._parse_constant( "GMT_IS_DATASET|GMT_VIA_MATRIX", valid=FAMILIES, valid_modifiers=VIAS ) # But this shouldn't. with pytest.raises(GMTInvalidInput): lib._parse_constant( "GMT_IS_DATASET|GMT_VIA_MATRIX", valid=FAMILIES, valid_modifiers=None ) def test_create_data_dataset(): """ Run the function to make sure it doesn't fail badly. """ with clib.Session() as lib: # Dataset from vectors data_vector = lib.create_data( family="GMT_IS_DATASET|GMT_VIA_VECTOR", geometry="GMT_IS_POINT", mode="GMT_CONTAINER_ONLY", dim=[10, 20, 1, 0], # columns, rows, layers, dtype ) # Dataset from matrices data_matrix = lib.create_data( family="GMT_IS_DATASET|GMT_VIA_MATRIX", geometry="GMT_IS_POINT", mode="GMT_CONTAINER_ONLY", dim=[10, 20, 1, 0], ) assert data_vector != data_matrix def test_create_data_grid_dim(): """ Create a grid ignoring range and inc. """ with clib.Session() as lib: # Grids from matrices using dim lib.create_data( family="GMT_IS_GRID|GMT_VIA_MATRIX", geometry="GMT_IS_SURFACE", mode="GMT_CONTAINER_ONLY", dim=[10, 20, 1, 0], ) def test_create_data_grid_range(): """ Create a grid specifying range and inc instead of dim. """ with clib.Session() as lib: # Grids from matrices using range and int lib.create_data( family="GMT_IS_GRID|GMT_VIA_MATRIX", geometry="GMT_IS_SURFACE", mode="GMT_CONTAINER_ONLY", ranges=[150.0, 250.0, -20.0, 20.0], inc=[0.1, 0.2], ) def test_create_data_fails(): """ Check that create_data raises exceptions for invalid input and output. """ # Passing in invalid mode with pytest.raises(GMTInvalidInput): with clib.Session() as lib: lib.create_data( family="GMT_IS_DATASET", geometry="GMT_IS_SURFACE", mode="Not_a_valid_mode", dim=[0, 0, 1, 0], ranges=[150.0, 250.0, -20.0, 20.0], inc=[0.1, 0.2], ) # Passing in invalid geometry with pytest.raises(GMTInvalidInput): with clib.Session() as lib: lib.create_data( family="GMT_IS_GRID", geometry="Not_a_valid_geometry", mode="GMT_CONTAINER_ONLY", dim=[0, 0, 1, 0], ranges=[150.0, 250.0, -20.0, 20.0], inc=[0.1, 0.2], ) # If the data pointer returned is None (NULL pointer) with pytest.raises(GMTCLibError): with clib.Session() as lib: with mock(lib, "GMT_Create_Data", returns=None): lib.create_data( family="GMT_IS_DATASET", geometry="GMT_IS_SURFACE", mode="GMT_CONTAINER_ONLY", dim=[11, 10, 2, 0], ) def test_virtual_file(): """ Test passing in data via a virtual file with a Dataset. """ dtypes = "float32 float64 int32 int64 uint32 uint64".split() shape = (5, 3) for dtype in dtypes: with clib.Session() as lib: family = "GMT_IS_DATASET|GMT_VIA_MATRIX" geometry = "GMT_IS_POINT" dataset = lib.create_data( family=family, geometry=geometry, mode="GMT_CONTAINER_ONLY", dim=[shape[1], shape[0], 1, 0], # columns, rows, layers, dtype ) data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape) lib.put_matrix(dataset, matrix=data) # Add the dataset to a virtual file and pass it along to gmt info vfargs = (family, geometry, "GMT_IN|GMT_IS_REFERENCE", dataset) with lib.open_virtual_file(*vfargs) as vfile: with GMTTempFile() as outfile: lib.call_module("info", "{} ->{}".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds = "\t".join( ["<{:.0f}/{:.0f}>".format(col.min(), col.max()) for col in data.T] ) expected = "<matrix memory>: N = {}\t{}\n".format(shape[0], bounds) assert output == expected def test_virtual_file_fails(): """ Check that opening and closing virtual files raises an exception for non- zero return codes. """ vfargs = ( "GMT_IS_DATASET|GMT_VIA_MATRIX", "GMT_IS_POINT", "GMT_IN|GMT_IS_REFERENCE", None, ) # Mock Open_VirtualFile to test the status check when entering the context. # If the exception is raised, the code won't get to the closing of the # virtual file. with clib.Session() as lib, mock(lib, "GMT_Open_VirtualFile", returns=1): with pytest.raises(GMTCLibError): with lib.open_virtual_file(*vfargs): print("Should not get to this code") # Test the status check when closing the virtual file # Mock the opening to return 0 (success) so that we don't open a file that # we won't close later. with clib.Session() as lib, mock(lib, "GMT_Open_VirtualFile", returns=0), mock( lib, "GMT_Close_VirtualFile", returns=1 ): with pytest.raises(GMTCLibError): with lib.open_virtual_file(*vfargs): pass print("Shouldn't get to this code either") def test_virtual_file_bad_direction(): """ Test passing an invalid direction argument. """ with clib.Session() as lib: vfargs = ( "GMT_IS_DATASET|GMT_VIA_MATRIX", "GMT_IS_POINT", "GMT_IS_GRID", # The invalid direction argument 0, ) with pytest.raises(GMTInvalidInput): with lib.open_virtual_file(*vfargs): print("This should have failed") def test_virtualfile_from_vectors(): """ Test the automation for transforming vectors to virtual file dataset. """ dtypes = "float32 float64 int32 int64 uint32 uint64".split() size = 10 for dtype in dtypes: x = np.arange(size, dtype=dtype) y = np.arange(size, size * 2, 1, dtype=dtype) z = np.arange(size * 2, size * 3, 1, dtype=dtype) with clib.Session() as lib: with lib.virtualfile_from_vectors(x, y, z) as vfile: with GMTTempFile() as outfile: lib.call_module("info", "{} ->{}".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds = "\t".join( ["<{:.0f}/{:.0f}>".format(i.min(), i.max()) for i in (x, y, z)] ) expected = "<vector memory>: N = {}\t{}\n".format(size, bounds) assert output == expected @pytest.mark.parametrize("dtype", [str, object]) def test_virtualfile_from_vectors_one_string_or_object_column(dtype): """ Test passing in one column with string or object dtype into virtual file dataset. """ size = 5 x = np.arange(size, dtype=np.int32) y = np.arange(size, size * 2, 1, dtype=np.int32) strings = np.array(["a", "bc", "defg", "hijklmn", "opqrst"], dtype=dtype) with clib.Session() as lib: with lib.virtualfile_from_vectors(x, y, strings) as vfile: with GMTTempFile() as outfile: lib.call_module("convert", f"{vfile} ->{outfile.name}") output = outfile.read(keep_tabs=True) expected = "".join(f"{i}\t{j}\t{k}\n" for i, j, k in zip(x, y, strings)) assert output == expected @pytest.mark.parametrize("dtype", [str, object]) def test_virtualfile_from_vectors_two_string_or_object_columns(dtype): """ Test passing in two columns of string or object dtype into virtual file dataset. """ size = 5 x = np.arange(size, dtype=np.int32) y = np.arange(size, size * 2, 1, dtype=np.int32) strings1 = np.array(["a", "bc", "def", "ghij", "klmno"], dtype=dtype) strings2 = np.array(["pqrst", "uvwx", "yz!", "@#", "$"], dtype=dtype) with clib.Session() as lib: with lib.virtualfile_from_vectors(x, y, strings1, strings2) as vfile: with GMTTempFile() as outfile: lib.call_module("convert", f"{vfile} ->{outfile.name}") output = outfile.read(keep_tabs=True) expected = "".join( f"{h}\t{i}\t{j} {k}\n" for h, i, j, k in zip(x, y, strings1, strings2) ) assert output == expected def test_virtualfile_from_vectors_transpose(): """ Test transforming matrix columns to virtual file dataset. """ dtypes = "float32 float64 int32 int64 uint32 uint64".split() shape = (7, 5) for dtype in dtypes: data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape) with clib.Session() as lib: with lib.virtualfile_from_vectors(*data.T) as vfile: with GMTTempFile() as outfile: lib.call_module("info", "{} -C ->{}".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds = "\t".join( ["{:.0f}\t{:.0f}".format(col.min(), col.max()) for col in data.T] ) expected = "{}\n".format(bounds) assert output == expected def test_virtualfile_from_vectors_diff_size(): """ Test the function fails for arrays of different sizes. """ x = np.arange(5) y = np.arange(6) with clib.Session() as lib: with pytest.raises(GMTInvalidInput): with lib.virtualfile_from_vectors(x, y): print("This should have failed") def test_virtualfile_from_matrix(): """ Test transforming a matrix to virtual file dataset. """ dtypes = "float32 float64 int32 int64 uint32 uint64".split() shape = (7, 5) for dtype in dtypes: data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape) with clib.Session() as lib: with lib.virtualfile_from_matrix(data) as vfile: with GMTTempFile() as outfile: lib.call_module("info", "{} ->{}".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds = "\t".join( ["<{:.0f}/{:.0f}>".format(col.min(), col.max()) for col in data.T] ) expected = "<matrix memory>: N = {}\t{}\n".format(shape[0], bounds) assert output == expected def test_virtualfile_from_matrix_slice(): """ Test transforming a slice of a larger array to virtual file dataset. """ dtypes = "float32 float64 int32 int64 uint32 uint64".split() shape = (10, 6) for dtype in dtypes: full_data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape) rows = 5 cols = 3 data = full_data[:rows, :cols] with clib.Session() as lib: with lib.virtualfile_from_matrix(data) as vfile: with GMTTempFile() as outfile: lib.call_module("info", "{} ->{}".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds = "\t".join( ["<{:.0f}/{:.0f}>".format(col.min(), col.max()) for col in data.T] ) expected = "<matrix memory>: N = {}\t{}\n".format(rows, bounds) assert output == expected def test_virtualfile_from_vectors_pandas(): """ Pass vectors to a dataset using pandas Series. """ dtypes = "float32 float64 int32 int64 uint32 uint64".split() size = 13 for dtype in dtypes: data = pd.DataFrame( data=dict( x=np.arange(size, dtype=dtype), y=np.arange(size, size * 2, 1, dtype=dtype), z=np.arange(size * 2, size * 3, 1, dtype=dtype), ) ) with clib.Session() as lib: with lib.virtualfile_from_vectors(data.x, data.y, data.z) as vfile: with GMTTempFile() as outfile: lib.call_module("info", "{} ->{}".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds = "\t".join( [ "<{:.0f}/{:.0f}>".format(i.min(), i.max()) for i in (data.x, data.y, data.z) ] ) expected = "<vector memory>: N = {}\t{}\n".format(size, bounds) assert output == expected def test_virtualfile_from_vectors_arraylike(): """ Pass array-like vectors to a dataset. """ size = 13 x = list(range(0, size, 1)) y = tuple(range(size, size * 2, 1)) z = range(size * 2, size * 3, 1) with clib.Session() as lib: with lib.virtualfile_from_vectors(x, y, z) as vfile: with GMTTempFile() as outfile: lib.call_module("info", "{} ->{}".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds = "\t".join( ["<{:.0f}/{:.0f}>".format(min(i), max(i)) for i in (x, y, z)] ) expected = "<vector memory>: N = {}\t{}\n".format(size, bounds) assert output == expected def test_extract_region_fails(): """ Check that extract region fails if nothing has been plotted. """ Figure() with pytest.raises(GMTCLibError): with clib.Session() as lib: lib.extract_region() def test_extract_region_two_figures(): """ Extract region should handle multiple figures existing at the same time. """ # Make two figures before calling extract_region to make sure that it's # getting from the current figure, not the last figure. fig1 = Figure() region1 = np.array([0, 10, -20, -10]) fig1.coast(region=region1, projection="M6i", frame=True, land="black") fig2 = Figure() fig2.basemap(region="US.HI+r5", projection="M6i", frame=True) # Activate the first figure and extract the region from it # Use in a different session to avoid any memory problems. with clib.Session() as lib: lib.call_module("figure", "{} -".format(fig1._name)) with clib.Session() as lib: wesn1 = lib.extract_region() npt.assert_allclose(wesn1, region1) # Now try it with the second one with clib.Session() as lib: lib.call_module("figure", "{} -".format(fig2._name)) with clib.Session() as lib: wesn2 = lib.extract_region() npt.assert_allclose(wesn2,
np.array([-165.0, -150.0, 15.0, 25.0])
numpy.array
# -*- encoding:utf-8 -*- # @Time : 2021/1/3 15:15 # @Author : gfjiang import os.path as osp import mmcv import numpy as np import cvtools import matplotlib.pyplot as plt import cv2.cv2 as cv from functools import partial import torch import math from cvtools.utils.path import add_prefix_filename_suffix from mmdet.ops import nms from mmdet.apis import init_detector, inference_detector def draw_features(module, input, output, work_dir='./'): x = output.cpu().numpy() out_channels = list(output.shape)[1] height = int(math.sqrt(out_channels)) width = height if list(output.shape)[2] < 128: return fig = plt.figure(figsize=(32, 32)) fig.subplots_adjust(left=0.05, right=0.95, bottom=0.05, top=0.95, wspace=0.05, hspace=0.05) for i in range(height * width): plt.subplot(height, width, i + 1) plt.axis('off') img = x[0, i, :, :] pmin = np.min(img) pmax = np.max(img) img = ((img - pmin) / (pmax - pmin + 0.000001))*255 # float在[0,1]之间,转换成0-255 img = img.astype(np.uint8) # 转成unit8 img = cv.applyColorMap(img, cv.COLORMAP_JET) # 生成heat map img = img[:, :, ::-1] # 注意cv2(BGR)和matplotlib(RGB)通道是相反的 plt.imshow(img) # print("{}/{}".format(i,width*height)) savename = get_image_name_for_hook(module, work_dir) fig.savefig(savename, dpi=100) fig.clf() plt.close() def get_image_name_for_hook(module, work_dir='./'): """ Generate image filename for hook function Parameters: ----------- module: module of neural network """ # os.makedirs(work_dir, exist_ok=True) module_name = str(module) base_name = module_name.split('(')[0] index = 0 image_name = '.' # '.' is surely exist, to make first loop condition True while osp.exists(image_name): index += 1 image_name = osp.join( work_dir, 'feats', '%s_%d.png' % (base_name, index)) return image_name class AerialDetectionOBB(object): def __init__(self, config, pth): self.imgs = [] self.cfg = mmcv.Config.fromfile(config) self.pth = pth print('loading model {} ...'.format(pth)) self.model = init_detector(self.cfg, self.pth, device='cuda:0') self.results = [] self.img_detected = [] # self.vis_feats((torch.nn.Conv2d, torch.nn.MaxPool2d)) def __call__(self, imgs_or_path, det_thrs=0.5, vis=False, vis_thr=0.5, save_root=''): if isinstance(imgs_or_path, str): self.imgs += cvtools.get_files_list(imgs_or_path) else: self.imgs += imgs_or_path prog_bar = mmcv.ProgressBar(len(self.imgs)) for _, img in enumerate(self.imgs): self.detect(img, det_thrs=det_thrs, vis=vis, vis_thr=vis_thr, save_root=save_root) prog_bar.update() def detect(self, img, det_thrs=0.5, vis=False, vis_thr=0.5, save_root=''): result = inference_detector(self.model, img) # result = self.nms(result) if isinstance(det_thrs, float): det_thrs = [det_thrs] * len(result) if vis: to_file = osp.join(save_root, osp.basename(img)) to_file = add_prefix_filename_suffix(to_file, suffix='_obb') self.vis(img, result, vis_thr=vis_thr, to_file=to_file) result = [det[det[..., -1] > det_thr] for det, det_thr in zip(result, det_thrs)] if len(result) == 0: print('detect: image {} has no object.'.format(img)) self.img_detected.append(img) self.results.append(result) return result def nms(self, result, nms_th=0.3): dets_num = [len(det_cls) for det_cls in result] result = np.vstack(result) _, ids = nms(result, nms_th) total_num = 0 nms_result = [] for num in dets_num: ids_cls = ids[np.where((total_num <= ids) & (ids < num))[0]] nms_result.append(result[ids_cls]) total_num += num return nms_result def vis(self, img, bbox_result, vis_thr=0.5, to_file='vis.jpg'): bboxes = np.vstack(bbox_result) labels = [ np.full(bbox.shape[0], i, dtype=np.int32) for i, bbox in enumerate(bbox_result) ] labels = np.concatenate(labels) inds =
np.where(bboxes[:, -1] > vis_thr)
numpy.where
import numpy as np import pytest import theano import theano.tensor as tt # Don't import test classes otherwise they get tested as part of the file from tests import unittest_tools as utt from tests.gpuarray.config import mode_with_gpu, mode_without_gpu, test_ctx_name from tests.tensor.test_basic import ( TestAlloc, TestComparison, TestJoinAndSplit, TestReshape, ) from tests.tensor.utils import rand, safe_make_node from theano.gpuarray.basic_ops import ( GpuAlloc, GpuAllocEmpty, GpuContiguous, GpuEye, GpuFromHost, GpuJoin, GpuReshape, GpuSplit, GpuToGpu, GpuTri, HostFromGpu, gpu_contiguous, gpu_join, host_from_gpu, ) from theano.gpuarray.elemwise import GpuDimShuffle, GpuElemwise from theano.gpuarray.subtensor import GpuSubtensor from theano.gpuarray.type import GpuArrayType, get_context, gpuarray_shared_constructor from theano.tensor import TensorType from theano.tensor.basic import alloc pygpu = pytest.importorskip("pygpu") gpuarray = pygpu.gpuarray utt.seed_rng() rng = np.random.RandomState(seed=utt.fetch_seed()) def inplace_func( inputs, outputs, mode=None, allow_input_downcast=False, on_unused_input="raise", name=None, ): if mode is None: mode = mode_with_gpu return theano.function( inputs, outputs, mode=mode, allow_input_downcast=allow_input_downcast, accept_inplace=True, on_unused_input=on_unused_input, name=name, ) def fake_shared(value, name=None, strict=False, allow_downcast=None, **kwargs): from theano.tensor.sharedvar import scalar_constructor, tensor_constructor for c in (gpuarray_shared_constructor, tensor_constructor, scalar_constructor): try: return c( value, name=name, strict=strict, allow_downcast=allow_downcast, **kwargs ) except TypeError: continue def rand_gpuarray(*shape, **kwargs): r = rng.rand(*shape) * 2 - 1 dtype = kwargs.pop("dtype", theano.config.floatX) cls = kwargs.pop("cls", None) if len(kwargs) != 0: raise TypeError("Unexpected argument %s", list(kwargs.keys())[0]) return gpuarray.array(r, dtype=dtype, cls=cls, context=get_context(test_ctx_name)) def makeTester( name, op, gpu_op, cases, checks=None, mode_gpu=mode_with_gpu, mode_nogpu=mode_without_gpu, skip=False, eps=1e-10, ): if checks is None: checks = {} _op = op _gpu_op = gpu_op _cases = cases _skip = skip _checks = checks class Checker(utt.OptimizationTestMixin): op = staticmethod(_op) gpu_op = staticmethod(_gpu_op) cases = _cases skip = _skip checks = _checks def setup_method(self): eval(self.__class__.__module__ + "." + self.__class__.__name__) def test_all(self): if skip: pytest.skip(skip) for testname, inputs in cases.items(): for _ in range(len(inputs)): if type(inputs[_]) is float: inputs[_] = np.asarray(inputs[_], dtype=theano.config.floatX) self.run_case(testname, inputs) def run_case(self, testname, inputs): inputs_ref = [theano.shared(inp) for inp in inputs] inputs_tst = [theano.shared(inp) for inp in inputs] try: node_ref = safe_make_node(self.op, *inputs_ref) node_tst = safe_make_node(self.op, *inputs_tst) except Exception as exc: err_msg = ( "Test %s::%s: Error occurred while making " "a node with inputs %s" ) % (self.gpu_op, testname, inputs) exc.args += (err_msg,) raise try: f_ref = inplace_func([], node_ref.outputs, mode=mode_nogpu) f_tst = inplace_func([], node_tst.outputs, mode=mode_gpu) except Exception as exc: err_msg = ( "Test %s::%s: Error occurred while trying to " "make a Function" ) % (self.gpu_op, testname) exc.args += (err_msg,) raise self.assertFunctionContains1(f_tst, self.gpu_op) ref_e = None try: expecteds = f_ref() except Exception as exc: ref_e = exc try: variables = f_tst() except Exception as exc: if ref_e is None: err_msg = ( "Test %s::%s: exception when calling the " "Function" ) % (self.gpu_op, testname) exc.args += (err_msg,) raise else: # if we raised an exception of the same type we're good. if isinstance(exc, type(ref_e)): return else: err_msg = ( "Test %s::%s: exception raised during test " "call was not the same as the reference " "call (got: %s, expected %s)" % (self.gpu_op, testname, type(exc), type(ref_e)) ) exc.args += (err_msg,) raise for i, (variable, expected) in enumerate(zip(variables, expecteds)): condition = ( variable.dtype != expected.dtype or variable.shape != expected.shape or not TensorType.values_eq_approx(variable, expected) ) assert not condition, ( "Test %s::%s: Output %s gave the wrong " "value. With inputs %s, expected %s " "(dtype %s), got %s (dtype %s)." % ( self.op, testname, i, inputs, expected, expected.dtype, variable, variable.dtype, ) ) for description, check in self.checks.items(): assert check(inputs, variables), ( "Test %s::%s: Failed check: %s " "(inputs were %s, ouputs were %s)" ) % (self.op, testname, description, inputs, variables) Checker.__name__ = name if hasattr(Checker, "__qualname__"): Checker.__qualname__ = name return Checker def test_transfer_cpu_gpu(): a = tt.fmatrix("a") g = GpuArrayType(dtype="float32", broadcastable=(False, False))("g") av = np.asarray(rng.rand(5, 4), dtype="float32") gv = gpuarray.array(av, context=get_context(test_ctx_name)) f = theano.function([a], GpuFromHost(test_ctx_name)(a)) fv = f(av) assert GpuArrayType.values_eq(fv, gv) f = theano.function([g], host_from_gpu(g)) fv = f(gv) assert np.all(fv == av) def test_transfer_gpu_gpu(): g = GpuArrayType( dtype="float32", broadcastable=(False, False), context_name=test_ctx_name )() av = np.asarray(rng.rand(5, 4), dtype="float32") gv = gpuarray.array(av, context=get_context(test_ctx_name)) mode = mode_with_gpu.excluding( "cut_gpua_host_transfers", "local_cut_gpua_host_gpua" ) f = theano.function([g], GpuToGpu(test_ctx_name)(g), mode=mode) topo = f.maker.fgraph.toposort() assert len(topo) == 1 assert isinstance(topo[0].op, GpuToGpu) fv = f(gv) assert GpuArrayType.values_eq(fv, gv) def test_transfer_strided(): # This is just to ensure that it works in theano # libgpuarray has a much more comprehensive suit of tests to # ensure correctness a = tt.fmatrix("a") g = GpuArrayType(dtype="float32", broadcastable=(False, False))("g") av = np.asarray(rng.rand(5, 8), dtype="float32") gv = gpuarray.array(av, context=get_context(test_ctx_name)) av = av[:, ::2] gv = gv[:, ::2] f = theano.function([a], GpuFromHost(test_ctx_name)(a)) fv = f(av) assert GpuArrayType.values_eq(fv, gv) f = theano.function([g], host_from_gpu(g)) fv = f(gv) assert np.all(fv == av) def gpu_alloc_expected(x, *shp): g = gpuarray.empty(shp, dtype=x.dtype, context=get_context(test_ctx_name)) g[:] = x return g TestGpuAlloc = makeTester( name="GpuAllocTester", # The +1 is there to allow the lift to the GPU. op=lambda *args: alloc(*args) + 1, gpu_op=GpuAlloc(test_ctx_name), cases=dict( correct01=(rand(), np.int32(7)), # just gives a DeepCopyOp with possibly wrong results on the CPU # correct01_bcast=(rand(1), np.int32(7)), correct02=(rand(), np.int32(4), np.int32(7)), correct12=(rand(7), np.int32(4), np.int32(7)), correct13=(rand(7), np.int32(2), np.int32(4), np.int32(7)), correct23=(rand(4, 7), np.int32(2), np.int32(4), np.int32(7)), bad_shape12=(rand(7), np.int32(7), np.int32(5)), ), ) class TestGPUAlloc(TestAlloc): dtype = "float32" mode = mode_with_gpu shared = staticmethod(gpuarray_shared_constructor) allocs = [GpuAlloc(test_ctx_name), GpuAlloc(test_ctx_name), tt.Alloc()] def test_alloc_empty(): for dt in ["float32", "int8"]: f = theano.function([], GpuAllocEmpty(dt, context_name=test_ctx_name)(2, 3)) assert len(f.maker.fgraph.apply_nodes) == 1 out = f() assert out.shape == (2, 3) assert out.dtype == dt f = theano.function( [], [ GpuAllocEmpty("uint64", test_ctx_name)(3, 2), GpuAllocEmpty("uint64", test_ctx_name)(3, 2), ], ) out = f() assert out[0].shape == (3, 2) assert out[0].dtype == "uint64" assert out[1].shape == (3, 2) assert out[1].dtype == "uint64" assert ( len( [ node for node in f.maker.fgraph.apply_nodes if isinstance(node.op, GpuAllocEmpty) ] ) == 1 ) def test_shape(): x = GpuArrayType(dtype="float32", broadcastable=[False, False, False])() v = gpuarray.zeros((3, 4, 5), dtype="float32", context=get_context(test_ctx_name)) f = theano.function([x], x.shape) topo = f.maker.fgraph.toposort() assert np.all(f(v) == (3, 4, 5)) if theano.config.mode != "FAST_COMPILE": assert len(topo) == 4 assert isinstance(topo[0].op, tt.opt.Shape_i) assert isinstance(topo[1].op, tt.opt.Shape_i) assert isinstance(topo[2].op, tt.opt.Shape_i) assert isinstance(topo[3].op, tt.opt.MakeVector) mode = mode_with_gpu.excluding("local_shape_to_shape_i") f = theano.function([x], x.shape, mode=mode) topo = f.maker.fgraph.toposort() assert np.all(f(v) == (3, 4, 5)) assert len(topo) == 1 assert isinstance(topo[0].op, tt.Shape) def test_gpu_contiguous(): a = tt.fmatrix("a") i = tt.iscalar("i") a_val = np.asarray(np.random.rand(4, 5), dtype="float32") # The reshape is needed otherwise we make the subtensor on the CPU # to transfer less data. f = theano.function( [a, i], gpu_contiguous(a.reshape((5, 4))[::i]), mode=mode_with_gpu ) topo = f.maker.fgraph.toposort() assert any([isinstance(node.op, GpuSubtensor) for node in topo]) assert any([isinstance(node.op, GpuContiguous) for node in topo]) assert f(a_val, 1).flags.c_contiguous assert f(a_val, 2).flags.c_contiguous assert f(a_val, 2).flags.c_contiguous class TestGPUReshape(TestReshape): def setup_method(self): self.shared = gpuarray_shared_constructor self.op = GpuReshape self.mode = mode_with_gpu self.ignore_topo = ( HostFromGpu, GpuFromHost, theano.compile.DeepCopyOp, GpuDimShuffle, GpuElemwise, tt.opt.Shape_i, tt.opt.MakeVector, ) assert self.op == GpuReshape class TestGPUComparison(TestComparison): def setup_method(self): utt.seed_rng() self.mode = mode_with_gpu self.shared = gpuarray_shared_constructor self.dtypes = ["float64", "float32"] class TestGPUJoinAndSplit(TestJoinAndSplit): def setup_method(self): self.mode = mode_with_gpu.excluding("constant_folding") self.join_op = GpuJoin() self.split_op_class = GpuSplit # Use join instead of MakeVector since there is no MakeVector on GPU self.make_vector_op = GpuJoin() # this is to avoid errors with limited devices self.floatX = "float32" self.hide_error = theano.config.mode not in ["DebugMode", "DEBUG_MODE"] def shared(x, **kwargs): return gpuarray_shared_constructor(x, target=test_ctx_name, **kwargs) self.shared = shared def test_gpusplit_opt(self): # Test that we move the node to the GPU # Also test float16 computation at the same time. rng = np.random.RandomState(seed=utt.fetch_seed()) m = self.shared(rng.rand(4, 6).astype("float16")) o = tt.Split(2)(m, 0, [2, 2]) assert o[0].dtype == "float16" f = theano.function([], o, mode=self.mode) assert any( [ isinstance(node.op, self.split_op_class) for node in f.maker.fgraph.toposort() ] ) o1, o2 = f() assert np.allclose(o1, m.get_value(borrow=True)[:2]) assert np.allclose(o2, m.get_value(borrow=True)[2:]) def test_gpujoin_gpualloc(): a = tt.fmatrix("a") a_val = np.asarray(np.random.rand(4, 5), dtype="float32") b = tt.fmatrix("b") b_val = np.asarray(np.random.rand(3, 5), dtype="float32") f = theano.function( [a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)) + 4, mode=mode_without_gpu ) f_gpu = theano.function( [a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)), mode=mode_with_gpu ) f_gpu2 = theano.function( [a, b], tt.join(0, tt.zeros_like(a), tt.ones_like(b)) + 4, mode=mode_with_gpu ) assert sum([node.op == tt.alloc for node in f.maker.fgraph.toposort()]) == 2 assert sum([node.op == tt.join_ for node in f.maker.fgraph.toposort()]) == 1 assert ( sum([isinstance(node.op, GpuAlloc) for node in f_gpu.maker.fgraph.toposort()]) == 2 ) assert sum([node.op == gpu_join for node in f_gpu.maker.fgraph.toposort()]) == 1 assert ( sum([isinstance(node.op, GpuAlloc) for node in f_gpu2.maker.fgraph.toposort()]) == 2 ) assert sum([node.op == gpu_join for node in f_gpu2.maker.fgraph.toposort()]) == 1 assert np.allclose(f(a_val, b_val), f_gpu2(a_val, b_val)) def test_gpueye(): def check(dtype, N, M_=None, k=0): # Theano does not accept None as a tensor. # So we must use a real value. M = M_ # Currently DebugMode does not support None as inputs even if this is # allowed. if M is None: M = N N_symb = tt.iscalar() M_symb = tt.iscalar() k_symb = tt.iscalar() out = tt.eye(N_symb, M_symb, k_symb, dtype=dtype) + np.array(1).astype(dtype) f = theano.function([N_symb, M_symb, k_symb], out, mode=mode_with_gpu) result = np.asarray(f(N, M, k)) - np.array(1).astype(dtype) assert np.allclose(result, np.eye(N, M_, k, dtype=dtype)) assert result.dtype == np.dtype(dtype) assert any([isinstance(node.op, GpuEye) for node in f.maker.fgraph.toposort()]) for dtype in ["float32", "int32", "float16"]: check(dtype, 3) # M != N, k = 0 check(dtype, 3, 5) check(dtype, 5, 3) # N == M, k != 0 check(dtype, 3, 3, 1) check(dtype, 3, 3, -1) # N < M, k != 0 check(dtype, 3, 5, 1) check(dtype, 3, 5, -1) # N > M, k != 0 check(dtype, 5, 3, 1) check(dtype, 5, 3, -1) # k > M, -k > N, k > M, k > N check(dtype, 5, 3, 3) check(dtype, 3, 5, 3) check(dtype, 5, 3, -3) check(dtype, 3, 5, -3) check(dtype, 5, 3, 6) check(dtype, 3, 5, -6) def test_hostfromgpu_shape_i(): # Test that the shape is lifted over hostfromgpu m = mode_with_gpu.including( "local_dot_to_dot22", "local_dot22_to_dot22scalar", "specialize" ) a = tt.fmatrix("a") ca = theano.gpuarray.type.GpuArrayType("float32", (False, False))() av = np.asarray(np.random.rand(5, 4), dtype="float32") cv = gpuarray.asarray( np.random.rand(5, 4), dtype="float32", context=get_context(test_ctx_name) ) f = theano.function([a], GpuFromHost(test_ctx_name)(a), mode=m) assert any(isinstance(x.op, GpuFromHost) for x in f.maker.fgraph.toposort()) f = theano.function([a], GpuFromHost(test_ctx_name)(a).shape, mode=m) topo = f.maker.fgraph.toposort() assert isinstance(topo[0].op, tt.opt.Shape_i) assert isinstance(topo[1].op, tt.opt.Shape_i) assert isinstance(topo[2].op, tt.opt.MakeVector) assert tuple(f(av)) == (5, 4) f = theano.function([ca], host_from_gpu(ca), mode=m) assert host_from_gpu in [x.op for x in f.maker.fgraph.toposort()] f = theano.function([ca], host_from_gpu(ca).shape, mode=m) topo = f.maker.fgraph.toposort() assert isinstance(topo[0].op, theano.compile.Shape_i) assert isinstance(topo[1].op, theano.compile.Shape_i) assert isinstance(topo[2].op, tt.opt.MakeVector) assert tuple(f(cv)) == (5, 4) def test_Gpujoin_inplace(): # Test Gpujoin to work inplace. # # This function tests the case when several elements are passed to the # Gpujoin function but all except one of them are empty. In this case # Gpujoin should work inplace and the output should be the view of the # non-empty element. s = tt.lscalar() data = np.array([3, 4, 5], dtype=theano.config.floatX) x = gpuarray_shared_constructor(data, borrow=True) z = tt.zeros((s,)) join = GpuJoin(view=0) c = join(0, x, z) f = theano.function([s], theano.Out(c, borrow=True)) if not isinstance(mode_with_gpu, theano.compile.DebugMode): assert x.get_value(borrow=True, return_internal_type=True) is f(0) assert np.allclose(f(0), [3, 4, 5]) def test_gpu_tril_triu(): def check_l(m, k=0): m_symb = tt.matrix(dtype=m.dtype) k_symb = tt.iscalar() f = theano.function( [m_symb, k_symb], tt.tril(m_symb, k_symb), mode=mode_with_gpu ) result = f(m, k) assert np.allclose(result, np.tril(m, k)) assert result.dtype == np.dtype(dtype) assert any([isinstance(node.op, GpuTri) for node in f.maker.fgraph.toposort()]) def check_u(m, k=0): m_symb = tt.matrix(dtype=m.dtype) k_symb = tt.iscalar() f = theano.function( [m_symb, k_symb], tt.triu(m_symb, k_symb), mode=mode_with_gpu ) result = f(m, k) assert np.allclose(result, np.triu(m, k)) assert result.dtype == np.dtype(dtype) assert any([isinstance(node.op, GpuTri) for node in f.maker.fgraph.toposort()]) utt.seed_rng() test_rng = np.random.RandomState(seed=utt.fetch_seed()) for dtype in ["float64", "float32", "float16"]: # try a big one m = np.asarray(test_rng.rand(5000, 5000) * 2 - 1, dtype=dtype) check_l(m, 0) check_l(m, 1) check_l(m, -1) check_u(m, 0) check_u(m, 1) check_u(m, -1) m = np.asarray(test_rng.rand(10, 10) * 2 - 1, dtype=dtype) check_l(m, 0) check_l(m, 1) check_l(m, -1) check_u(m, 0) check_u(m, 1) check_u(m, -1) m = np.asarray(test_rng.rand(10, 5) * 2 - 1, dtype=dtype) check_l(m, 0) check_l(m, 1) check_l(m, -1) check_u(m, 0) check_u(m, 1) check_u(m, -1) def test_gputri(): def check(dtype, N, M_=None, k=0): # Theano does not accept None as a tensor. # So we must use a real value. M = M_ # Currently DebugMode does not support None as inputs even if this is # allowed. if M is None: M = N N_symb = tt.iscalar() M_symb = tt.iscalar() k_symb = tt.iscalar() out = tt.tri(N_symb, M_symb, k_symb, dtype=dtype) +
np.array(1)
numpy.array
import argparse import json import numpy as np import pandas as pd import os from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report,f1_score from keras.models import Sequential from keras.layers import Dense, Dropout from keras import backend as K from keras.utils.vis_utils import plot_model from sklearn.externals import joblib import time def f1(y_true, y_pred): def recall(y_true, y_pred): """Recall metric. Only computes a batch-wise average of recall. Computes the recall, a metric for multi-label classification of how many relevant items are selected. """ true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) recall = true_positives / (possible_positives + K.epsilon()) return recall def precision(y_true, y_pred): """Precision metric. Only computes a batch-wise average of precision. Computes the precision, a metric for multi-label classification of how many selected items are relevant. """ true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + K.epsilon()) return precision precision = precision(y_true, y_pred) recall = recall(y_true, y_pred) return 2*((precision*recall)/(precision+recall+K.epsilon())) def get_embeddings(sentences_list,layer_json): ''' :param sentences_list: the path o the sentences.txt :param layer_json: the path of the json file that contains the embeddings of the sentences :return: Dictionary with key each sentence of the sentences_list and as value the embedding ''' sentences = dict()#dict with key the index of each line of the sentences_list.txt and as value the sentence embeddings = dict()##dict with key the index of each sentence and as value the its embedding sentence_emb = dict()#key:sentence,value:its embedding with open(sentences_list,'r') as file: for index,line in enumerate(file): sentences[index] = line.strip() with open(layer_json, 'r',encoding='utf-8') as f: for line in f: embeddings[json.loads(line)['linex_index']] = np.asarray(json.loads(line)['features']) for key,value in sentences.items(): sentence_emb[value] = embeddings[key] return sentence_emb def train_classifier(sentences_list,layer_json,dataset_csv,filename): ''' :param sentences_list: the path o the sentences.txt :param layer_json: the path of the json file that contains the embeddings of the sentences :param dataset_csv: the path of the dataset :param filename: The path of the pickle file that the model will be stored :return: ''' dataset = pd.read_csv(dataset_csv) bert_dict = get_embeddings(sentences_list,layer_json) length = list() sentence_emb = list() previous_emb = list() next_list = list() section_list = list() label = list() errors = 0 for row in dataset.iterrows(): sentence = row[1][0].strip() previous = row[1][1].strip() nexts = row[1][2].strip() section = row[1][3].strip() if sentence in bert_dict: sentence_emb.append(bert_dict[sentence]) else: sentence_emb.append(np.zeros(768)) print(sentence) errors += 1 if previous in bert_dict: previous_emb.append(bert_dict[previous]) else: previous_emb.append(np.zeros(768)) if nexts in bert_dict: next_list.append(bert_dict[nexts]) else: next_list.append(np.zeros(768)) if section in bert_dict: section_list.append(bert_dict[section]) else: section_list.append(np.zeros(768)) length.append(row[1][4]) label.append(row[1][5]) sentence_emb = np.asarray(sentence_emb) print(sentence_emb.shape) next_emb = np.asarray(next_list) print(next_emb.shape) previous_emb = np.asarray(previous_emb) print(previous_emb.shape) section_emb = np.asarray(section_list) print(sentence_emb.shape) length = np.asarray(length) print(length.shape) label = np.asarray(label) print(errors) features = np.concatenate([sentence_emb, previous_emb, next_emb,section_emb], axis=1) features = np.column_stack([features, length]) # np.append(features,length,axis=1) print(features.shape) X_train, X_val, y_train, y_val = train_test_split(features, label, test_size=0.33, random_state=42) log = LogisticRegression(random_state=0, solver='newton-cg', max_iter=1000, C=0.1) log.fit(X_train, y_train) #save the model _ = joblib.dump(log, filename, compress=9) predictions = log.predict(X_val) print("###########################################") print("Results using embeddings from the",layer_json,"file") print(classification_report(y_val, predictions)) print("F1 score using Logistic Regression:",f1_score(y_val, predictions)) print("###########################################") #train a DNN f1_results = list() for i in range(3): model = Sequential() model.add(Dense(64, activation='relu', trainable=True)) model.add(Dense(128, activation='relu', trainable=True)) model.add(Dropout(0.30)) model.add(Dense(64, activation='relu', trainable=True)) model.add(Dropout(0.25)) model.add(Dense(64, activation='relu', trainable=True)) model.add(Dropout(0.35)) model.add(Dense(1, activation='sigmoid')) # compile network model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=[f1]) # fit network model.fit(X_train, y_train, epochs=100, batch_size=64) loss, f_1 = model.evaluate(X_val, y_val, verbose=1) print('\nTest F1: %f' % (f_1 * 100)) f1_results.append(f_1) model = None print("###########################################") print("Results using embeddings from the", layer_json, "file") # evaluate print(np.mean(f1_results)) print("###########################################") def parameter_tuning_LR(sentences_list,layer_json,dataset_csv): ''' :param sentences_list: the path o the sentences.txt :param layer_json: the path of the json file that contains the embeddings of the sentences :param dataset_csv: the path of the dataset :return: ''' dataset = pd.read_csv(dataset_csv) bert_dict = get_embeddings(sentences_list,layer_json) length = list() sentence_emb = list() previous_emb = list() next_list = list() section_list = list() label = list() errors = 0 for row in dataset.iterrows(): sentence = row[1][0].strip() previous = row[1][1].strip() nexts = row[1][2].strip() section = row[1][3].strip() if sentence in bert_dict: sentence_emb.append(bert_dict[sentence]) else: sentence_emb.append(np.zeros(768)) print(sentence) errors += 1 if previous in bert_dict: previous_emb.append(bert_dict[previous]) else: previous_emb.append(np.zeros(768)) if nexts in bert_dict: next_list.append(bert_dict[nexts]) else: next_list.append(np.zeros(768)) if section in bert_dict: section_list.append(bert_dict[section]) else: section_list.append(np.zeros(768)) length.append(row[1][4]) label.append(row[1][5]) sentence_emb = np.asarray(sentence_emb) print(sentence_emb.shape) next_emb = np.asarray(next_list) print(next_emb.shape) previous_emb = np.asarray(previous_emb) print(previous_emb.shape) section_emb = np.asarray(section_list) print(sentence_emb.shape) length = np.asarray(length) print(length.shape) label =
np.asarray(label)
numpy.asarray
# -*- coding: utf-8 -*- """ Created on Thu Nov 28 12:10:11 2019 @author: Omer """ ## File handler ## This file was initially intended purely to generate the matrices for the near earth code found in: https://public.ccsds.org/Pubs/131x1o2e2s.pdf ## The values from the above pdf were copied manually to a txt file, and it is the purpose of this file to parse it. ## The emphasis here is on correctness, I currently do not see a reason to generalise this file, since matrices will be saved in either json or some matrix friendly format. import numpy as np from scipy.linalg import circulant #import matplotlib.pyplot as plt import scipy.io import common import hashlib import os projectDir = os.environ.get('LDPC') if projectDir == None: import pathlib projectDir = pathlib.Path(__file__).parent.absolute() ## <NAME>: added on 01/12/2020, need to make sure this doesn't break anything. import sys sys.path.insert(1, projectDir) FILE_HANDLER_INT_DATA_TYPE = np.int32 GENERAL_CODE_MATRIX_DATA_TYPE = np.int32 NIBBLE_CONVERTER = np.array([8, 4, 2, 1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) def nibbleToHex(inputArray): n = NIBBLE_CONVERTER.dot(inputArray) if n == 10: h = 'A' elif n== 11: h = 'B' elif n== 12: h = 'C' elif n== 13: h = 'D' elif n== 14: h = 'E' elif n== 15: h = 'F' else: h = str(n) return h def binaryArraytoHex(inputArray): d1 = len(inputArray) assert (d1 % 4 == 0) outputArray = np.zeros(d1//4, dtype = str) outputString = '' for j in range(d1//4): nibble = inputArray[4 * j : 4 * j + 4] h = nibbleToHex(nibble) outputArray[j] = h outputString = outputString + h return outputArray, outputString def hexStringToBinaryArray(hexString): outputBinary = np.array([], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) for i in hexString: if i == '0': nibble = np.array([0,0,0,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '1': nibble = np.array([0,0,0,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '2': nibble = np.array([0,0,1,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '3': nibble = np.array([0,0,1,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '4': nibble = np.array([0,1,0,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '5': nibble = np.array([0,1,0,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '6': nibble = np.array([0,1,1,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '7': nibble = np.array([0,1,1,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '8': nibble = np.array([1,0,0,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '9': nibble =
np.array([1,0,0,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
numpy.array
import argparse import json import numpy as np import pandas as pd import os from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report,f1_score from keras.models import Sequential from keras.layers import Dense, Dropout from keras import backend as K from keras.utils.vis_utils import plot_model from sklearn.externals import joblib import time def f1(y_true, y_pred): def recall(y_true, y_pred): """Recall metric. Only computes a batch-wise average of recall. Computes the recall, a metric for multi-label classification of how many relevant items are selected. """ true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) recall = true_positives / (possible_positives + K.epsilon()) return recall def precision(y_true, y_pred): """Precision metric. Only computes a batch-wise average of precision. Computes the precision, a metric for multi-label classification of how many selected items are relevant. """ true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + K.epsilon()) return precision precision = precision(y_true, y_pred) recall = recall(y_true, y_pred) return 2*((precision*recall)/(precision+recall+K.epsilon())) def get_embeddings(sentences_list,layer_json): ''' :param sentences_list: the path o the sentences.txt :param layer_json: the path of the json file that contains the embeddings of the sentences :return: Dictionary with key each sentence of the sentences_list and as value the embedding ''' sentences = dict()#dict with key the index of each line of the sentences_list.txt and as value the sentence embeddings = dict()##dict with key the index of each sentence and as value the its embedding sentence_emb = dict()#key:sentence,value:its embedding with open(sentences_list,'r') as file: for index,line in enumerate(file): sentences[index] = line.strip() with open(layer_json, 'r',encoding='utf-8') as f: for line in f: embeddings[json.loads(line)['linex_index']] = np.asarray(json.loads(line)['features']) for key,value in sentences.items(): sentence_emb[value] = embeddings[key] return sentence_emb def train_classifier(sentences_list,layer_json,dataset_csv,filename): ''' :param sentences_list: the path o the sentences.txt :param layer_json: the path of the json file that contains the embeddings of the sentences :param dataset_csv: the path of the dataset :param filename: The path of the pickle file that the model will be stored :return: ''' dataset = pd.read_csv(dataset_csv) bert_dict = get_embeddings(sentences_list,layer_json) length = list() sentence_emb = list() previous_emb = list() next_list = list() section_list = list() label = list() errors = 0 for row in dataset.iterrows(): sentence = row[1][0].strip() previous = row[1][1].strip() nexts = row[1][2].strip() section = row[1][3].strip() if sentence in bert_dict: sentence_emb.append(bert_dict[sentence]) else: sentence_emb.append(np.zeros(768)) print(sentence) errors += 1 if previous in bert_dict: previous_emb.append(bert_dict[previous]) else: previous_emb.append(np.zeros(768)) if nexts in bert_dict: next_list.append(bert_dict[nexts]) else: next_list.append(np.zeros(768)) if section in bert_dict: section_list.append(bert_dict[section]) else: section_list.append(np.zeros(768)) length.append(row[1][4]) label.append(row[1][5]) sentence_emb = np.asarray(sentence_emb) print(sentence_emb.shape) next_emb = np.asarray(next_list) print(next_emb.shape) previous_emb = np.asarray(previous_emb) print(previous_emb.shape) section_emb = np.asarray(section_list) print(sentence_emb.shape) length = np.asarray(length) print(length.shape) label = np.asarray(label) print(errors) features = np.concatenate([sentence_emb, previous_emb, next_emb,section_emb], axis=1) features = np.column_stack([features, length]) # np.append(features,length,axis=1) print(features.shape) X_train, X_val, y_train, y_val = train_test_split(features, label, test_size=0.33, random_state=42) log = LogisticRegression(random_state=0, solver='newton-cg', max_iter=1000, C=0.1) log.fit(X_train, y_train) #save the model _ = joblib.dump(log, filename, compress=9) predictions = log.predict(X_val) print("###########################################") print("Results using embeddings from the",layer_json,"file") print(classification_report(y_val, predictions)) print("F1 score using Logistic Regression:",f1_score(y_val, predictions)) print("###########################################") #train a DNN f1_results = list() for i in range(3): model = Sequential() model.add(Dense(64, activation='relu', trainable=True)) model.add(Dense(128, activation='relu', trainable=True)) model.add(Dropout(0.30)) model.add(Dense(64, activation='relu', trainable=True)) model.add(Dropout(0.25)) model.add(Dense(64, activation='relu', trainable=True)) model.add(Dropout(0.35)) model.add(Dense(1, activation='sigmoid')) # compile network model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=[f1]) # fit network model.fit(X_train, y_train, epochs=100, batch_size=64) loss, f_1 = model.evaluate(X_val, y_val, verbose=1) print('\nTest F1: %f' % (f_1 * 100)) f1_results.append(f_1) model = None print("###########################################") print("Results using embeddings from the", layer_json, "file") # evaluate print(np.mean(f1_results)) print("###########################################") def parameter_tuning_LR(sentences_list,layer_json,dataset_csv): ''' :param sentences_list: the path o the sentences.txt :param layer_json: the path of the json file that contains the embeddings of the sentences :param dataset_csv: the path of the dataset :return: ''' dataset = pd.read_csv(dataset_csv) bert_dict = get_embeddings(sentences_list,layer_json) length = list() sentence_emb = list() previous_emb = list() next_list = list() section_list = list() label = list() errors = 0 for row in dataset.iterrows(): sentence = row[1][0].strip() previous = row[1][1].strip() nexts = row[1][2].strip() section = row[1][3].strip() if sentence in bert_dict: sentence_emb.append(bert_dict[sentence]) else: sentence_emb.append(np.zeros(768)) print(sentence) errors += 1 if previous in bert_dict: previous_emb.append(bert_dict[previous]) else: previous_emb.append(np.zeros(768)) if nexts in bert_dict: next_list.append(bert_dict[nexts]) else: next_list.append(np.zeros(768)) if section in bert_dict: section_list.append(bert_dict[section]) else: section_list.append(np.zeros(768)) length.append(row[1][4]) label.append(row[1][5]) sentence_emb = np.asarray(sentence_emb) print(sentence_emb.shape) next_emb = np.asarray(next_list) print(next_emb.shape) previous_emb =
np.asarray(previous_emb)
numpy.asarray
# ________ # / # \ / # \ / # \/ import random import textwrap import emd_mean import AdvEMDpy import emd_basis import emd_utils import numpy as np import pandas as pd import cvxpy as cvx import seaborn as sns import matplotlib.pyplot as plt from scipy.integrate import odeint from scipy.ndimage import gaussian_filter from emd_utils import time_extension, Utility from scipy.interpolate import CubicSpline from emd_hilbert import Hilbert, hilbert_spectrum from emd_preprocess import Preprocess from emd_mean import Fluctuation from AdvEMDpy import EMD # alternate packages from PyEMD import EMD as pyemd0215 import emd as emd040 sns.set(style='darkgrid') pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001) pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time) pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series) # plot 0 - addition fig = plt.figure(figsize=(9, 4)) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('First Iteration of Sifting Algorithm') plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1) plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()], c='r', label=r'$M(t_i)$', zorder=2) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4) plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()], c='c', label=r'$m(t_j)$', zorder=3) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5) plt.yticks(ticks=[-2, -1, 0, 1, 2]) plt.xticks(ticks=[0, np.pi, 2 * np.pi], labels=[r'0', r'$\pi$', r'$2\pi$']) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/pseudo_algorithm.png') plt.show() knots = np.arange(12) time = np.linspace(0, 11, 1101) basis = emd_basis.Basis(time=time, time_series=time) b_spline_basis = basis.cubic_b_spline(knots) chsi_basis = basis.chsi_basis(knots) # plot 1 plt.title('Non-Natural Cubic B-Spline Bases at Boundary') plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $') plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $') plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $') plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $') plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $') plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $']) plt.xlim(4.4, 6.6) plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.legend(loc='upper left') plt.savefig('jss_figures/boundary_bases.png') plt.show() # plot 1a - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) knots_uniform = np.linspace(0, 2 * np.pi, 51) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0] fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Uniform Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Uniform Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Uniform Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots_uniform)): axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_uniform.png') plt.show() # plot 1b - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=1, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Statically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Statically Optimised Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Statically Optimised Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots)): axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_1.png') plt.show() # plot 1c - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=2, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Dynamically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Dynamically Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Dynamically Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots[i])): axs[i].plot(knots[i][j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_2.png') plt.show() # plot 1d - addition window = 81 fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Filtering Demonstration') axs[1].set_title('Zoomed Region') preprocess_time = pseudo_alg_time.copy() np.random.seed(1) random.seed(1) preprocess_time_series = pseudo_alg_time_series + np.random.normal(0, 0.1, len(preprocess_time)) for i in random.sample(range(1000), 500): preprocess_time_series[i] += np.random.normal(0, 1) preprocess = Preprocess(time=preprocess_time, time_series=preprocess_time_series) axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[0].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[0].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[0].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[1].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[1].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[1].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_filter.png') plt.show() # plot 1e - addition fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Smoothing Demonstration') axs[1].set_title('Zoomed Region') axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[0].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) downsampled_and_decimated = preprocess.downsample() axs[0].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 11)) downsampled = preprocess.downsample(decimate=False) axs[0].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[1].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) axs[1].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 13)) axs[1].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.06, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.06, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_smooth.png') plt.show() # plot 2 fig, axs = plt.subplots(1, 2, sharey=True) axs[0].set_title('Cubic B-Spline Bases') axs[0].plot(time, b_spline_basis[2, :].T, '--', label='Basis 1') axs[0].plot(time, b_spline_basis[3, :].T, '--', label='Basis 2') axs[0].plot(time, b_spline_basis[4, :].T, '--', label='Basis 3') axs[0].plot(time, b_spline_basis[5, :].T, '--', label='Basis 4') axs[0].legend(loc='upper left') axs[0].plot(5 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].plot(6 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].set_xticks([5, 6]) axs[0].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[0].set_xlim(4.5, 6.5) axs[1].set_title('Cubic Hermite Spline Bases') axs[1].plot(time, chsi_basis[10, :].T, '--') axs[1].plot(time, chsi_basis[11, :].T, '--') axs[1].plot(time, chsi_basis[12, :].T, '--') axs[1].plot(time, chsi_basis[13, :].T, '--') axs[1].plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].set_xticks([5, 6]) axs[1].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[1].set_xlim(4.5, 6.5) plt.savefig('jss_figures/comparing_bases.png') plt.show() # plot 3 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_dash = maxima_y[-1] * np.ones_like(max_dash_time) min_dash_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_dash = minima_y[-1] * np.ones_like(min_dash_time) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) max_discard = maxima_y[-1] max_discard_time = minima_x[-1] - maxima_x[-1] + minima_x[-1] max_discard_dash_time = np.linspace(max_discard_time - width, max_discard_time + width, 101) max_discard_dash = max_discard * np.ones_like(max_discard_dash_time) dash_2_time = np.linspace(minima_x[-1], max_discard_time, 101) dash_2 = np.linspace(minima_y[-1], max_discard, 101) end_point_time = time[-1] end_point = time_series[-1] time_reflect = np.linspace((5 - a) * np.pi, (5 + a) * np.pi, 101) time_series_reflect = np.flip(np.cos(np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101)) + np.cos(5 * np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101))) time_series_anti_reflect = time_series_reflect[0] - time_series_reflect utils = emd_utils.Utility(time=time, time_series=time_series_anti_reflect) anti_max_bool = utils.max_bool_func_1st_order_fd() anti_max_point_time = time_reflect[anti_max_bool] anti_max_point = time_series_anti_reflect[anti_max_bool] utils = emd_utils.Utility(time=time, time_series=time_series_reflect) no_anchor_max_time = time_reflect[utils.max_bool_func_1st_order_fd()] no_anchor_max = time_series_reflect[utils.max_bool_func_1st_order_fd()] point_1 = 5.4 length_distance = np.linspace(maxima_y[-1], minima_y[-1], 101) length_distance_time = point_1 * np.pi * np.ones_like(length_distance) length_time = np.linspace(point_1 * np.pi - width, point_1 * np.pi + width, 101) length_top = maxima_y[-1] * np.ones_like(length_time) length_bottom = minima_y[-1] * np.ones_like(length_time) point_2 = 5.2 length_distance_2 = np.linspace(time_series[-1], minima_y[-1], 101) length_distance_time_2 = point_2 * np.pi * np.ones_like(length_distance_2) length_time_2 = np.linspace(point_2 * np.pi - width, point_2 * np.pi + width, 101) length_top_2 = time_series[-1] * np.ones_like(length_time_2) length_bottom_2 = minima_y[-1] * np.ones_like(length_time_2) symmetry_axis_1_time = minima_x[-1] * np.ones(101) symmetry_axis_2_time = time[-1] * np.ones(101) symmetry_axis = np.linspace(-2, 2, 101) end_time = np.linspace(time[-1] - width, time[-1] + width, 101) end_signal = time_series[-1] * np.ones_like(end_time) anti_symmetric_time = np.linspace(time[-1] - 0.5, time[-1] + 0.5, 101) anti_symmetric_signal = time_series[-1] * np.ones_like(anti_symmetric_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Symmetry Edge Effects Example') plt.plot(time_reflect, time_series_reflect, 'g--', LineWidth=2, label=textwrap.fill('Symmetric signal', 10)) plt.plot(time_reflect[:51], time_series_anti_reflect[:51], '--', c='purple', LineWidth=2, label=textwrap.fill('Anti-symmetric signal', 10)) plt.plot(max_dash_time, max_dash, 'k-') plt.plot(min_dash_time, min_dash, 'k-') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(length_distance_time, length_distance, 'k--') plt.plot(length_distance_time_2, length_distance_2, 'k--') plt.plot(length_time, length_top, 'k-') plt.plot(length_time, length_bottom, 'k-') plt.plot(length_time_2, length_top_2, 'k-') plt.plot(length_time_2, length_bottom_2, 'k-') plt.plot(end_time, end_signal, 'k-') plt.plot(symmetry_axis_1_time, symmetry_axis, 'r--', zorder=1) plt.plot(anti_symmetric_time, anti_symmetric_signal, 'r--', zorder=1) plt.plot(symmetry_axis_2_time, symmetry_axis, 'r--', label=textwrap.fill('Axes of symmetry', 10), zorder=1) plt.text(5.1 * np.pi, -0.7, r'$\beta$L') plt.text(5.34 * np.pi, -0.05, 'L') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(max_discard_time, max_discard, c='purple', zorder=4, label=textwrap.fill('Symmetric Discard maxima', 10)) plt.scatter(end_point_time, end_point, c='orange', zorder=4, label=textwrap.fill('Symmetric Anchor maxima', 10)) plt.scatter(anti_max_point_time, anti_max_point, c='green', zorder=4, label=textwrap.fill('Anti-Symmetric maxima', 10)) plt.scatter(no_anchor_max_time, no_anchor_max, c='gray', zorder=4, label=textwrap.fill('Symmetric maxima', 10)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_symmetry_anti.png') plt.show() # plot 4 a = 0.21 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_1 = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_dash_2 = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_dash_time_1 = maxima_x[-1] * np.ones_like(max_dash_1) max_dash_time_2 = maxima_x[-2] * np.ones_like(max_dash_1) min_dash_1 = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_dash_2 = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_dash_time_1 = minima_x[-1] * np.ones_like(min_dash_1) min_dash_time_2 = minima_x[-2] * np.ones_like(min_dash_1) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) dash_2_time = np.linspace(maxima_x[-1], minima_x[-2], 101) dash_2 = np.linspace(maxima_y[-1], minima_y[-2], 101) s1 = (minima_y[-2] - maxima_y[-1]) / (minima_x[-2] - maxima_x[-1]) slope_based_maximum_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) slope_based_maximum = minima_y[-1] + (slope_based_maximum_time - minima_x[-1]) * s1 max_dash_time_3 = slope_based_maximum_time * np.ones_like(max_dash_1) max_dash_3 = np.linspace(slope_based_maximum - width, slope_based_maximum + width, 101) dash_3_time = np.linspace(minima_x[-1], slope_based_maximum_time, 101) dash_3 = np.linspace(minima_y[-1], slope_based_maximum, 101) s2 = (minima_y[-1] - maxima_y[-1]) / (minima_x[-1] - maxima_x[-1]) slope_based_minimum_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) slope_based_minimum = slope_based_maximum - (slope_based_maximum_time - slope_based_minimum_time) * s2 min_dash_time_3 = slope_based_minimum_time * np.ones_like(min_dash_1) min_dash_3 = np.linspace(slope_based_minimum - width, slope_based_minimum + width, 101) dash_4_time = np.linspace(slope_based_maximum_time, slope_based_minimum_time) dash_4 = np.linspace(slope_based_maximum, slope_based_minimum) maxima_dash = np.linspace(2.5 - width, 2.5 + width, 101) maxima_dash_time_1 = maxima_x[-2] * np.ones_like(maxima_dash) maxima_dash_time_2 = maxima_x[-1] * np.ones_like(maxima_dash) maxima_dash_time_3 = slope_based_maximum_time * np.ones_like(maxima_dash) maxima_line_dash_time = np.linspace(maxima_x[-2], slope_based_maximum_time, 101) maxima_line_dash = 2.5 * np.ones_like(maxima_line_dash_time) minima_dash = np.linspace(-3.4 - width, -3.4 + width, 101) minima_dash_time_1 = minima_x[-2] * np.ones_like(minima_dash) minima_dash_time_2 = minima_x[-1] * np.ones_like(minima_dash) minima_dash_time_3 = slope_based_minimum_time * np.ones_like(minima_dash) minima_line_dash_time = np.linspace(minima_x[-2], slope_based_minimum_time, 101) minima_line_dash = -3.4 * np.ones_like(minima_line_dash_time) # slightly edit signal to make difference between slope-based method and improved slope-based method more clear time_series[time >= minima_x[-1]] = 1.5 * (time_series[time >= minima_x[-1]] - time_series[time == minima_x[-1]]) + \ time_series[time == minima_x[-1]] improved_slope_based_maximum_time = time[-1] improved_slope_based_maximum = time_series[-1] improved_slope_based_minimum_time = slope_based_minimum_time improved_slope_based_minimum = improved_slope_based_maximum + s2 * (improved_slope_based_minimum_time - improved_slope_based_maximum_time) min_dash_4 = np.linspace(improved_slope_based_minimum - width, improved_slope_based_minimum + width, 101) min_dash_time_4 = improved_slope_based_minimum_time * np.ones_like(min_dash_4) dash_final_time = np.linspace(improved_slope_based_maximum_time, improved_slope_based_minimum_time, 101) dash_final = np.linspace(improved_slope_based_maximum, improved_slope_based_minimum, 101) ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 0.9 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Slope-Based Edge Effects Example') plt.plot(max_dash_time_1, max_dash_1, 'k-') plt.plot(max_dash_time_2, max_dash_2, 'k-') plt.plot(max_dash_time_3, max_dash_3, 'k-') plt.plot(min_dash_time_1, min_dash_1, 'k-') plt.plot(min_dash_time_2, min_dash_2, 'k-') plt.plot(min_dash_time_3, min_dash_3, 'k-') plt.plot(min_dash_time_4, min_dash_4, 'k-') plt.plot(maxima_dash_time_1, maxima_dash, 'k-') plt.plot(maxima_dash_time_2, maxima_dash, 'k-') plt.plot(maxima_dash_time_3, maxima_dash, 'k-') plt.plot(minima_dash_time_1, minima_dash, 'k-') plt.plot(minima_dash_time_2, minima_dash, 'k-') plt.plot(minima_dash_time_3, minima_dash, 'k-') plt.text(4.34 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.74 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.12 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.50 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.30 * np.pi, 0.35, r'$s_1$') plt.text(4.43 * np.pi, -0.20, r'$s_2$') plt.text(4.30 * np.pi + (minima_x[-1] - minima_x[-2]), 0.35 + (minima_y[-1] - minima_y[-2]), r'$s_1$') plt.text(4.43 * np.pi + (slope_based_minimum_time - minima_x[-1]), -0.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.text(4.50 * np.pi + (slope_based_minimum_time - minima_x[-1]), 1.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.plot(minima_line_dash_time, minima_line_dash, 'k--') plt.plot(maxima_line_dash_time, maxima_line_dash, 'k--') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(dash_3_time, dash_3, 'k--') plt.plot(dash_4_time, dash_4, 'k--') plt.plot(dash_final_time, dash_final, 'k--') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(slope_based_maximum_time, slope_based_maximum, c='orange', zorder=4, label=textwrap.fill('Slope-based maximum', 11)) plt.scatter(slope_based_minimum_time, slope_based_minimum, c='purple', zorder=4, label=textwrap.fill('Slope-based minimum', 11)) plt.scatter(improved_slope_based_maximum_time, improved_slope_based_maximum, c='deeppink', zorder=4, label=textwrap.fill('Improved slope-based maximum', 11)) plt.scatter(improved_slope_based_minimum_time, improved_slope_based_minimum, c='dodgerblue', zorder=4, label=textwrap.fill('Improved slope-based minimum', 11)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-3, -2, -1, 0, 1, 2), ('-3', '-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_slope_based.png') plt.show() # plot 5 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] A2 = np.abs(maxima_y[-2] - minima_y[-2]) / 2 A1 = np.abs(maxima_y[-1] - minima_y[-1]) / 2 P2 = 2 * np.abs(maxima_x[-2] - minima_x[-2]) P1 = 2 * np.abs(maxima_x[-1] - minima_x[-1]) Huang_time = (P1 / P2) * (time[time >= maxima_x[-2]] - time[time == maxima_x[-2]]) + maxima_x[-1] Huang_wave = (A1 / A2) * (time_series[time >= maxima_x[-2]] - time_series[time == maxima_x[-2]]) + maxima_y[-1] Coughlin_time = Huang_time Coughlin_wave = A1 * np.cos(2 * np.pi * (1 / P1) * (Coughlin_time - Coughlin_time[0])) Average_max_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) Average_max = (maxima_y[-2] + maxima_y[-1]) / 2 Average_min_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) Average_min = (minima_y[-2] + minima_y[-1]) / 2 utils_Huang = emd_utils.Utility(time=time, time_series=Huang_wave) Huang_max_bool = utils_Huang.max_bool_func_1st_order_fd() Huang_min_bool = utils_Huang.min_bool_func_1st_order_fd() utils_Coughlin = emd_utils.Utility(time=time, time_series=Coughlin_wave) Coughlin_max_bool = utils_Coughlin.max_bool_func_1st_order_fd() Coughlin_min_bool = utils_Coughlin.min_bool_func_1st_order_fd() Huang_max_time = Huang_time[Huang_max_bool] Huang_max = Huang_wave[Huang_max_bool] Huang_min_time = Huang_time[Huang_min_bool] Huang_min = Huang_wave[Huang_min_bool] Coughlin_max_time = Coughlin_time[Coughlin_max_bool] Coughlin_max = Coughlin_wave[Coughlin_max_bool] Coughlin_min_time = Coughlin_time[Coughlin_min_bool] Coughlin_min = Coughlin_wave[Coughlin_min_bool] max_2_x_time = np.linspace(maxima_x[-2] - width, maxima_x[-2] + width, 101) max_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) max_2_x = maxima_y[-2] * np.ones_like(max_2_x_time) min_2_x_time = np.linspace(minima_x[-2] - width, minima_x[-2] + width, 101) min_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) min_2_x = minima_y[-2] * np.ones_like(min_2_x_time) dash_max_min_2_x = np.linspace(minima_y[-2], maxima_y[-2], 101) dash_max_min_2_x_time = 5.3 * np.pi *
np.ones_like(dash_max_min_2_x)
numpy.ones_like
# ________ # / # \ / # \ / # \/ import random import textwrap import emd_mean import AdvEMDpy import emd_basis import emd_utils import numpy as np import pandas as pd import cvxpy as cvx import seaborn as sns import matplotlib.pyplot as plt from scipy.integrate import odeint from scipy.ndimage import gaussian_filter from emd_utils import time_extension, Utility from scipy.interpolate import CubicSpline from emd_hilbert import Hilbert, hilbert_spectrum from emd_preprocess import Preprocess from emd_mean import Fluctuation from AdvEMDpy import EMD # alternate packages from PyEMD import EMD as pyemd0215 import emd as emd040 sns.set(style='darkgrid') pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001) pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time) pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series) # plot 0 - addition fig = plt.figure(figsize=(9, 4)) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('First Iteration of Sifting Algorithm') plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1) plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()], c='r', label=r'$M(t_i)$', zorder=2) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4) plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()], c='c', label=r'$m(t_j)$', zorder=3) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5) plt.yticks(ticks=[-2, -1, 0, 1, 2]) plt.xticks(ticks=[0, np.pi, 2 * np.pi], labels=[r'0', r'$\pi$', r'$2\pi$']) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/pseudo_algorithm.png') plt.show() knots = np.arange(12) time = np.linspace(0, 11, 1101) basis = emd_basis.Basis(time=time, time_series=time) b_spline_basis = basis.cubic_b_spline(knots) chsi_basis = basis.chsi_basis(knots) # plot 1 plt.title('Non-Natural Cubic B-Spline Bases at Boundary') plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $') plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $') plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $') plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $') plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $') plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $']) plt.xlim(4.4, 6.6) plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.legend(loc='upper left') plt.savefig('jss_figures/boundary_bases.png') plt.show() # plot 1a - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) knots_uniform = np.linspace(0, 2 * np.pi, 51) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0] fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Uniform Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Uniform Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Uniform Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots_uniform)): axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_uniform.png') plt.show() # plot 1b - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=1, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Statically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Statically Optimised Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Statically Optimised Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots)): axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_1.png') plt.show() # plot 1c - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=2, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Dynamically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Dynamically Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Dynamically Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots[i])): axs[i].plot(knots[i][j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_2.png') plt.show() # plot 1d - addition window = 81 fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Filtering Demonstration') axs[1].set_title('Zoomed Region') preprocess_time = pseudo_alg_time.copy() np.random.seed(1) random.seed(1) preprocess_time_series = pseudo_alg_time_series + np.random.normal(0, 0.1, len(preprocess_time)) for i in random.sample(range(1000), 500): preprocess_time_series[i] += np.random.normal(0, 1) preprocess = Preprocess(time=preprocess_time, time_series=preprocess_time_series) axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[0].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[0].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[0].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[1].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[1].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[1].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_filter.png') plt.show() # plot 1e - addition fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Smoothing Demonstration') axs[1].set_title('Zoomed Region') axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[0].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) downsampled_and_decimated = preprocess.downsample() axs[0].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 11)) downsampled = preprocess.downsample(decimate=False) axs[0].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[1].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) axs[1].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 13)) axs[1].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.06, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.06, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_smooth.png') plt.show() # plot 2 fig, axs = plt.subplots(1, 2, sharey=True) axs[0].set_title('Cubic B-Spline Bases') axs[0].plot(time, b_spline_basis[2, :].T, '--', label='Basis 1') axs[0].plot(time, b_spline_basis[3, :].T, '--', label='Basis 2') axs[0].plot(time, b_spline_basis[4, :].T, '--', label='Basis 3') axs[0].plot(time, b_spline_basis[5, :].T, '--', label='Basis 4') axs[0].legend(loc='upper left') axs[0].plot(5 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].plot(6 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].set_xticks([5, 6]) axs[0].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[0].set_xlim(4.5, 6.5) axs[1].set_title('Cubic Hermite Spline Bases') axs[1].plot(time, chsi_basis[10, :].T, '--') axs[1].plot(time, chsi_basis[11, :].T, '--') axs[1].plot(time, chsi_basis[12, :].T, '--') axs[1].plot(time, chsi_basis[13, :].T, '--') axs[1].plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].set_xticks([5, 6]) axs[1].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[1].set_xlim(4.5, 6.5) plt.savefig('jss_figures/comparing_bases.png') plt.show() # plot 3 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_dash = maxima_y[-1] * np.ones_like(max_dash_time) min_dash_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_dash = minima_y[-1] * np.ones_like(min_dash_time) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) max_discard = maxima_y[-1] max_discard_time = minima_x[-1] - maxima_x[-1] + minima_x[-1] max_discard_dash_time = np.linspace(max_discard_time - width, max_discard_time + width, 101) max_discard_dash = max_discard * np.ones_like(max_discard_dash_time) dash_2_time = np.linspace(minima_x[-1], max_discard_time, 101) dash_2 = np.linspace(minima_y[-1], max_discard, 101) end_point_time = time[-1] end_point = time_series[-1] time_reflect = np.linspace((5 - a) * np.pi, (5 + a) * np.pi, 101) time_series_reflect = np.flip(np.cos(np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101)) + np.cos(5 * np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101))) time_series_anti_reflect = time_series_reflect[0] - time_series_reflect utils = emd_utils.Utility(time=time, time_series=time_series_anti_reflect) anti_max_bool = utils.max_bool_func_1st_order_fd() anti_max_point_time = time_reflect[anti_max_bool] anti_max_point = time_series_anti_reflect[anti_max_bool] utils = emd_utils.Utility(time=time, time_series=time_series_reflect) no_anchor_max_time = time_reflect[utils.max_bool_func_1st_order_fd()] no_anchor_max = time_series_reflect[utils.max_bool_func_1st_order_fd()] point_1 = 5.4 length_distance = np.linspace(maxima_y[-1], minima_y[-1], 101) length_distance_time = point_1 * np.pi * np.ones_like(length_distance) length_time = np.linspace(point_1 * np.pi - width, point_1 * np.pi + width, 101) length_top = maxima_y[-1] * np.ones_like(length_time) length_bottom = minima_y[-1] * np.ones_like(length_time) point_2 = 5.2 length_distance_2 = np.linspace(time_series[-1], minima_y[-1], 101) length_distance_time_2 = point_2 * np.pi * np.ones_like(length_distance_2) length_time_2 = np.linspace(point_2 * np.pi - width, point_2 * np.pi + width, 101) length_top_2 = time_series[-1] * np.ones_like(length_time_2) length_bottom_2 = minima_y[-1] * np.ones_like(length_time_2) symmetry_axis_1_time = minima_x[-1] * np.ones(101) symmetry_axis_2_time = time[-1] * np.ones(101) symmetry_axis = np.linspace(-2, 2, 101) end_time = np.linspace(time[-1] - width, time[-1] + width, 101) end_signal = time_series[-1] * np.ones_like(end_time) anti_symmetric_time = np.linspace(time[-1] - 0.5, time[-1] + 0.5, 101) anti_symmetric_signal = time_series[-1] * np.ones_like(anti_symmetric_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Symmetry Edge Effects Example') plt.plot(time_reflect, time_series_reflect, 'g--', LineWidth=2, label=textwrap.fill('Symmetric signal', 10)) plt.plot(time_reflect[:51], time_series_anti_reflect[:51], '--', c='purple', LineWidth=2, label=textwrap.fill('Anti-symmetric signal', 10)) plt.plot(max_dash_time, max_dash, 'k-') plt.plot(min_dash_time, min_dash, 'k-') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(length_distance_time, length_distance, 'k--') plt.plot(length_distance_time_2, length_distance_2, 'k--') plt.plot(length_time, length_top, 'k-') plt.plot(length_time, length_bottom, 'k-') plt.plot(length_time_2, length_top_2, 'k-') plt.plot(length_time_2, length_bottom_2, 'k-') plt.plot(end_time, end_signal, 'k-') plt.plot(symmetry_axis_1_time, symmetry_axis, 'r--', zorder=1) plt.plot(anti_symmetric_time, anti_symmetric_signal, 'r--', zorder=1) plt.plot(symmetry_axis_2_time, symmetry_axis, 'r--', label=textwrap.fill('Axes of symmetry', 10), zorder=1) plt.text(5.1 * np.pi, -0.7, r'$\beta$L') plt.text(5.34 * np.pi, -0.05, 'L') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(max_discard_time, max_discard, c='purple', zorder=4, label=textwrap.fill('Symmetric Discard maxima', 10)) plt.scatter(end_point_time, end_point, c='orange', zorder=4, label=textwrap.fill('Symmetric Anchor maxima', 10)) plt.scatter(anti_max_point_time, anti_max_point, c='green', zorder=4, label=textwrap.fill('Anti-Symmetric maxima', 10)) plt.scatter(no_anchor_max_time, no_anchor_max, c='gray', zorder=4, label=textwrap.fill('Symmetric maxima', 10)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_symmetry_anti.png') plt.show() # plot 4 a = 0.21 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_1 = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_dash_2 = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_dash_time_1 = maxima_x[-1] * np.ones_like(max_dash_1) max_dash_time_2 = maxima_x[-2] * np.ones_like(max_dash_1) min_dash_1 = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_dash_2 = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_dash_time_1 = minima_x[-1] * np.ones_like(min_dash_1) min_dash_time_2 = minima_x[-2] * np.ones_like(min_dash_1) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) dash_2_time = np.linspace(maxima_x[-1], minima_x[-2], 101) dash_2 = np.linspace(maxima_y[-1], minima_y[-2], 101) s1 = (minima_y[-2] - maxima_y[-1]) / (minima_x[-2] - maxima_x[-1]) slope_based_maximum_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) slope_based_maximum = minima_y[-1] + (slope_based_maximum_time - minima_x[-1]) * s1 max_dash_time_3 = slope_based_maximum_time * np.ones_like(max_dash_1) max_dash_3 = np.linspace(slope_based_maximum - width, slope_based_maximum + width, 101) dash_3_time = np.linspace(minima_x[-1], slope_based_maximum_time, 101) dash_3 = np.linspace(minima_y[-1], slope_based_maximum, 101) s2 = (minima_y[-1] - maxima_y[-1]) / (minima_x[-1] - maxima_x[-1]) slope_based_minimum_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) slope_based_minimum = slope_based_maximum - (slope_based_maximum_time - slope_based_minimum_time) * s2 min_dash_time_3 = slope_based_minimum_time * np.ones_like(min_dash_1) min_dash_3 = np.linspace(slope_based_minimum - width, slope_based_minimum + width, 101) dash_4_time = np.linspace(slope_based_maximum_time, slope_based_minimum_time) dash_4 = np.linspace(slope_based_maximum, slope_based_minimum) maxima_dash = np.linspace(2.5 - width, 2.5 + width, 101) maxima_dash_time_1 = maxima_x[-2] * np.ones_like(maxima_dash) maxima_dash_time_2 = maxima_x[-1] * np.ones_like(maxima_dash) maxima_dash_time_3 = slope_based_maximum_time * np.ones_like(maxima_dash) maxima_line_dash_time = np.linspace(maxima_x[-2], slope_based_maximum_time, 101) maxima_line_dash = 2.5 * np.ones_like(maxima_line_dash_time) minima_dash = np.linspace(-3.4 - width, -3.4 + width, 101) minima_dash_time_1 = minima_x[-2] * np.ones_like(minima_dash) minima_dash_time_2 = minima_x[-1] * np.ones_like(minima_dash) minima_dash_time_3 = slope_based_minimum_time * np.ones_like(minima_dash) minima_line_dash_time = np.linspace(minima_x[-2], slope_based_minimum_time, 101) minima_line_dash = -3.4 * np.ones_like(minima_line_dash_time) # slightly edit signal to make difference between slope-based method and improved slope-based method more clear time_series[time >= minima_x[-1]] = 1.5 * (time_series[time >= minima_x[-1]] - time_series[time == minima_x[-1]]) + \ time_series[time == minima_x[-1]] improved_slope_based_maximum_time = time[-1] improved_slope_based_maximum = time_series[-1] improved_slope_based_minimum_time = slope_based_minimum_time improved_slope_based_minimum = improved_slope_based_maximum + s2 * (improved_slope_based_minimum_time - improved_slope_based_maximum_time) min_dash_4 = np.linspace(improved_slope_based_minimum - width, improved_slope_based_minimum + width, 101) min_dash_time_4 = improved_slope_based_minimum_time * np.ones_like(min_dash_4) dash_final_time = np.linspace(improved_slope_based_maximum_time, improved_slope_based_minimum_time, 101) dash_final = np.linspace(improved_slope_based_maximum, improved_slope_based_minimum, 101) ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 0.9 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Slope-Based Edge Effects Example') plt.plot(max_dash_time_1, max_dash_1, 'k-') plt.plot(max_dash_time_2, max_dash_2, 'k-') plt.plot(max_dash_time_3, max_dash_3, 'k-') plt.plot(min_dash_time_1, min_dash_1, 'k-') plt.plot(min_dash_time_2, min_dash_2, 'k-') plt.plot(min_dash_time_3, min_dash_3, 'k-') plt.plot(min_dash_time_4, min_dash_4, 'k-') plt.plot(maxima_dash_time_1, maxima_dash, 'k-') plt.plot(maxima_dash_time_2, maxima_dash, 'k-') plt.plot(maxima_dash_time_3, maxima_dash, 'k-') plt.plot(minima_dash_time_1, minima_dash, 'k-') plt.plot(minima_dash_time_2, minima_dash, 'k-') plt.plot(minima_dash_time_3, minima_dash, 'k-') plt.text(4.34 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.74 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.12 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.50 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.30 * np.pi, 0.35, r'$s_1$') plt.text(4.43 * np.pi, -0.20, r'$s_2$') plt.text(4.30 * np.pi + (minima_x[-1] - minima_x[-2]), 0.35 + (minima_y[-1] - minima_y[-2]), r'$s_1$') plt.text(4.43 * np.pi + (slope_based_minimum_time - minima_x[-1]), -0.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.text(4.50 * np.pi + (slope_based_minimum_time - minima_x[-1]), 1.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.plot(minima_line_dash_time, minima_line_dash, 'k--') plt.plot(maxima_line_dash_time, maxima_line_dash, 'k--') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(dash_3_time, dash_3, 'k--') plt.plot(dash_4_time, dash_4, 'k--') plt.plot(dash_final_time, dash_final, 'k--') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(slope_based_maximum_time, slope_based_maximum, c='orange', zorder=4, label=textwrap.fill('Slope-based maximum', 11)) plt.scatter(slope_based_minimum_time, slope_based_minimum, c='purple', zorder=4, label=textwrap.fill('Slope-based minimum', 11)) plt.scatter(improved_slope_based_maximum_time, improved_slope_based_maximum, c='deeppink', zorder=4, label=textwrap.fill('Improved slope-based maximum', 11)) plt.scatter(improved_slope_based_minimum_time, improved_slope_based_minimum, c='dodgerblue', zorder=4, label=textwrap.fill('Improved slope-based minimum', 11)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-3, -2, -1, 0, 1, 2), ('-3', '-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_slope_based.png') plt.show() # plot 5 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] A2 = np.abs(maxima_y[-2] - minima_y[-2]) / 2 A1 = np.abs(maxima_y[-1] - minima_y[-1]) / 2 P2 = 2 * np.abs(maxima_x[-2] - minima_x[-2]) P1 = 2 * np.abs(maxima_x[-1] - minima_x[-1]) Huang_time = (P1 / P2) * (time[time >= maxima_x[-2]] - time[time == maxima_x[-2]]) + maxima_x[-1] Huang_wave = (A1 / A2) * (time_series[time >= maxima_x[-2]] - time_series[time == maxima_x[-2]]) + maxima_y[-1] Coughlin_time = Huang_time Coughlin_wave = A1 * np.cos(2 * np.pi * (1 / P1) * (Coughlin_time - Coughlin_time[0])) Average_max_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) Average_max = (maxima_y[-2] + maxima_y[-1]) / 2 Average_min_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) Average_min = (minima_y[-2] + minima_y[-1]) / 2 utils_Huang = emd_utils.Utility(time=time, time_series=Huang_wave) Huang_max_bool = utils_Huang.max_bool_func_1st_order_fd() Huang_min_bool = utils_Huang.min_bool_func_1st_order_fd() utils_Coughlin = emd_utils.Utility(time=time, time_series=Coughlin_wave) Coughlin_max_bool = utils_Coughlin.max_bool_func_1st_order_fd() Coughlin_min_bool = utils_Coughlin.min_bool_func_1st_order_fd() Huang_max_time = Huang_time[Huang_max_bool] Huang_max = Huang_wave[Huang_max_bool] Huang_min_time = Huang_time[Huang_min_bool] Huang_min = Huang_wave[Huang_min_bool] Coughlin_max_time = Coughlin_time[Coughlin_max_bool] Coughlin_max = Coughlin_wave[Coughlin_max_bool] Coughlin_min_time = Coughlin_time[Coughlin_min_bool] Coughlin_min = Coughlin_wave[Coughlin_min_bool] max_2_x_time = np.linspace(maxima_x[-2] - width, maxima_x[-2] + width, 101) max_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) max_2_x = maxima_y[-2] * np.ones_like(max_2_x_time) min_2_x_time = np.linspace(minima_x[-2] - width, minima_x[-2] + width, 101) min_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) min_2_x = minima_y[-2] * np.ones_like(min_2_x_time) dash_max_min_2_x = np.linspace(minima_y[-2], maxima_y[-2], 101) dash_max_min_2_x_time = 5.3 * np.pi * np.ones_like(dash_max_min_2_x) max_2_y = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) max_2_y_time = maxima_x[-2] * np.ones_like(max_2_y) min_2_y = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) min_2_y_time = minima_x[-2] * np.ones_like(min_2_y) dash_max_min_2_y_time = np.linspace(minima_x[-2], maxima_x[-2], 101) dash_max_min_2_y = -1.8 * np.ones_like(dash_max_min_2_y_time) max_1_x_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) max_1_x = maxima_y[-1] * np.ones_like(max_1_x_time) min_1_x_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) min_1_x = minima_y[-1] * np.ones_like(min_1_x_time) dash_max_min_1_x = np.linspace(minima_y[-1], maxima_y[-1], 101) dash_max_min_1_x_time = 5.4 * np.pi * np.ones_like(dash_max_min_1_x) max_1_y = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) max_1_y_time = maxima_x[-1] * np.ones_like(max_1_y) min_1_y = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) min_1_y_time = minima_x[-1] * np.ones_like(min_1_y) dash_max_min_1_y_time = np.linspace(minima_x[-1], maxima_x[-1], 101) dash_max_min_1_y = -2.1 * np.ones_like(dash_max_min_1_y_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Characteristic Wave Effects Example') plt.plot(time, time_series, LineWidth=2, label='Signal') plt.scatter(Huang_max_time, Huang_max, c='magenta', zorder=4, label=textwrap.fill('Huang maximum', 10)) plt.scatter(Huang_min_time, Huang_min, c='lime', zorder=4, label=textwrap.fill('Huang minimum', 10)) plt.scatter(Coughlin_max_time, Coughlin_max, c='darkorange', zorder=4, label=textwrap.fill('Coughlin maximum', 14)) plt.scatter(Coughlin_min_time, Coughlin_min, c='dodgerblue', zorder=4, label=textwrap.fill('Coughlin minimum', 14)) plt.scatter(Average_max_time, Average_max, c='orangered', zorder=4, label=textwrap.fill('Average maximum', 14)) plt.scatter(Average_min_time, Average_min, c='cyan', zorder=4, label=textwrap.fill('Average minimum', 14)) plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.plot(Huang_time, Huang_wave, '--', c='darkviolet', label=textwrap.fill('Huang Characteristic Wave', 14)) plt.plot(Coughlin_time, Coughlin_wave, '--', c='darkgreen', label=textwrap.fill('Coughlin Characteristic Wave', 14)) plt.plot(max_2_x_time, max_2_x, 'k-') plt.plot(max_2_x_time_side, max_2_x, 'k-') plt.plot(min_2_x_time, min_2_x, 'k-') plt.plot(min_2_x_time_side, min_2_x, 'k-') plt.plot(dash_max_min_2_x_time, dash_max_min_2_x, 'k--') plt.text(5.16 * np.pi, 0.85, r'$2a_2$') plt.plot(max_2_y_time, max_2_y, 'k-') plt.plot(max_2_y_time, max_2_y_side, 'k-') plt.plot(min_2_y_time, min_2_y, 'k-') plt.plot(min_2_y_time, min_2_y_side, 'k-') plt.plot(dash_max_min_2_y_time, dash_max_min_2_y, 'k--') plt.text(4.08 * np.pi, -2.2, r'$\frac{p_2}{2}$') plt.plot(max_1_x_time, max_1_x, 'k-') plt.plot(max_1_x_time_side, max_1_x, 'k-') plt.plot(min_1_x_time, min_1_x, 'k-') plt.plot(min_1_x_time_side, min_1_x, 'k-') plt.plot(dash_max_min_1_x_time, dash_max_min_1_x, 'k--') plt.text(5.42 * np.pi, -0.1, r'$2a_1$') plt.plot(max_1_y_time, max_1_y, 'k-') plt.plot(max_1_y_time, max_1_y_side, 'k-') plt.plot(min_1_y_time, min_1_y, 'k-') plt.plot(min_1_y_time, min_1_y_side, 'k-') plt.plot(dash_max_min_1_y_time, dash_max_min_1_y, 'k--') plt.text(4.48 * np.pi, -2.5, r'$\frac{p_1}{2}$') plt.xlim(3.9 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_characteristic_wave.png') plt.show() # plot 6 t = np.linspace(5, 95, 100) signal_orig = np.cos(2 * np.pi * t / 50) + 0.6 * np.cos(2 * np.pi * t / 25) + 0.5 * np.sin(2 * np.pi * t / 200) util_nn = emd_utils.Utility(time=t, time_series=signal_orig) maxima = signal_orig[util_nn.max_bool_func_1st_order_fd()] minima = signal_orig[util_nn.min_bool_func_1st_order_fd()] cs_max = CubicSpline(t[util_nn.max_bool_func_1st_order_fd()], maxima) cs_min = CubicSpline(t[util_nn.min_bool_func_1st_order_fd()], minima) time = np.linspace(0, 5 * np.pi, 1001) lsq_signal = np.cos(time) + np.cos(5 * time) knots = np.linspace(0, 5 * np.pi, 101) time_extended = time_extension(time) time_series_extended = np.zeros_like(time_extended) / 0 time_series_extended[int(len(lsq_signal) - 1):int(2 * (len(lsq_signal) - 1) + 1)] = lsq_signal neural_network_m = 200 neural_network_k = 100 # forward -> P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[(-(neural_network_m + neural_network_k - col)):(-(neural_network_m - col))] P[-1, col] = 1 # for additive constant t = lsq_signal[-neural_network_m:] # test - top seed_weights = np.ones(neural_network_k) / neural_network_k weights = 0 * seed_weights.copy() train_input = P[:-1, :] lr = 0.01 for iterations in range(1000): output = np.matmul(weights, train_input) error = (t - output) gradients = error * (- train_input) # guess average gradients average_gradients = np.mean(gradients, axis=1) # steepest descent max_gradient_vector = average_gradients * (np.abs(average_gradients) == max(np.abs(average_gradients))) adjustment = - lr * average_gradients # adjustment = - lr * max_gradient_vector weights += adjustment # test - bottom weights_right = np.hstack((weights, 0)) max_count_right = 0 min_count_right = 0 i_right = 0 while ((max_count_right < 1) or (min_count_right < 1)) and (i_right < len(lsq_signal) - 1): time_series_extended[int(2 * (len(lsq_signal) - 1) + 1 + i_right)] = \ sum(weights_right * np.hstack((time_series_extended[ int(2 * (len(lsq_signal) - 1) + 1 - neural_network_k + i_right): int(2 * (len(lsq_signal) - 1) + 1 + i_right)], 1))) i_right += 1 if i_right > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_right += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_right += 1 # backward <- P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[int(col + 1):int(col + neural_network_k + 1)] P[-1, col] = 1 # for additive constant t = lsq_signal[:neural_network_m] vx = cvx.Variable(int(neural_network_k + 1)) objective = cvx.Minimize(cvx.norm((2 * (vx * P) + 1 - t), 2)) # linear activation function is arbitrary prob = cvx.Problem(objective) result = prob.solve(verbose=True, solver=cvx.ECOS) weights_left = np.array(vx.value) max_count_left = 0 min_count_left = 0 i_left = 0 while ((max_count_left < 1) or (min_count_left < 1)) and (i_left < len(lsq_signal) - 1): time_series_extended[int(len(lsq_signal) - 2 - i_left)] = \ 2 * sum(weights_left * np.hstack((time_series_extended[int(len(lsq_signal) - 1 - i_left): int(len(lsq_signal) - 1 - i_left + neural_network_k)], 1))) + 1 i_left += 1 if i_left > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_left += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_left += 1 lsq_utils = emd_utils.Utility(time=time, time_series=lsq_signal) utils_extended = emd_utils.Utility(time=time_extended, time_series=time_series_extended) maxima = lsq_signal[lsq_utils.max_bool_func_1st_order_fd()] maxima_time = time[lsq_utils.max_bool_func_1st_order_fd()] maxima_extrapolate = time_series_extended[utils_extended.max_bool_func_1st_order_fd()][-1] maxima_extrapolate_time = time_extended[utils_extended.max_bool_func_1st_order_fd()][-1] minima = lsq_signal[lsq_utils.min_bool_func_1st_order_fd()] minima_time = time[lsq_utils.min_bool_func_1st_order_fd()] minima_extrapolate = time_series_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] minima_extrapolate_time = time_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Single Neuron Neural Network Example') plt.plot(time, lsq_signal, zorder=2, label='Signal') plt.plot(time_extended, time_series_extended, c='g', zorder=1, label=textwrap.fill('Extrapolated signal', 12)) plt.scatter(maxima_time, maxima, c='r', zorder=3, label='Maxima') plt.scatter(minima_time, minima, c='b', zorder=3, label='Minima') plt.scatter(maxima_extrapolate_time, maxima_extrapolate, c='magenta', zorder=3, label=textwrap.fill('Extrapolated maxima', 12)) plt.scatter(minima_extrapolate_time, minima_extrapolate, c='cyan', zorder=4, label=textwrap.fill('Extrapolated minima', 12)) plt.plot(((time[-302] + time[-301]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k', label=textwrap.fill('Neural network inputs', 13)) plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(((time_extended[-1001] + time_extended[-1002]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k') plt.plot(((time[-202] + time[-201]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed', label=textwrap.fill('Neural network targets', 13)) plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2), ((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2), ((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='gray') plt.plot(((time_extended[-1001] + time_extended[-1000]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed') plt.xlim(3.4 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/neural_network.png') plt.show() # plot 6a np.random.seed(0) time = np.linspace(0, 5 * np.pi, 1001) knots_51 = np.linspace(0, 5 * np.pi, 51) time_series = np.cos(2 * time) + np.cos(4 * time) + np.cos(8 * time) noise = np.random.normal(0, 1, len(time_series)) time_series += noise advemdpy = EMD(time=time, time_series=time_series) imfs_51, hts_51, ifs_51 = advemdpy.empirical_mode_decomposition(knots=knots_51, max_imfs=3, edge_effect='symmetric_anchor', verbose=False)[:3] knots_31 = np.linspace(0, 5 * np.pi, 31) imfs_31, hts_31, ifs_31 = advemdpy.empirical_mode_decomposition(knots=knots_31, max_imfs=2, edge_effect='symmetric_anchor', verbose=False)[:3] knots_11 = np.linspace(0, 5 * np.pi, 11) imfs_11, hts_11, ifs_11 = advemdpy.empirical_mode_decomposition(knots=knots_11, max_imfs=1, edge_effect='symmetric_anchor', verbose=False)[:3] fig, axs = plt.subplots(3, 1) plt.suptitle(textwrap.fill('Comparison of Trends Extracted with Different Knot Sequences', 40)) plt.subplots_adjust(hspace=0.1) axs[0].plot(time, time_series, label='Time series') axs[0].plot(time, imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 1, IMF 2, & IMF 3 with 51 knots', 21)) print(f'DFA fluctuation with 51 knots: {np.round(np.var(time_series - (imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :])), 3)}') for knot in knots_51: axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[0].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[0].set_xticklabels(['', '', '', '', '', '']) axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[0].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[0].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].plot(time, time_series, label='Time series') axs[1].plot(time, imfs_31[1, :] + imfs_31[2, :], label=textwrap.fill('Sum of IMF 1 and IMF 2 with 31 knots', 19)) axs[1].plot(time, imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 2 and IMF 3 with 51 knots', 19)) print(f'DFA fluctuation with 31 knots: {np.round(np.var(time_series - (imfs_31[1, :] + imfs_31[2, :])), 3)}') for knot in knots_31: axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[1].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[1].set_xticklabels(['', '', '', '', '', '']) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) axs[1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[1].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[1].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[1].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') axs[2].plot(time, time_series, label='Time series') axs[2].plot(time, imfs_11[1, :], label='IMF 1 with 11 knots') axs[2].plot(time, imfs_31[2, :], label='IMF 2 with 31 knots') axs[2].plot(time, imfs_51[3, :], label='IMF 3 with 51 knots') print(f'DFA fluctuation with 11 knots: {np.round(np.var(time_series - imfs_51[3, :]), 3)}') for knot in knots_11: axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[2].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[2].set_xticklabels(['$0$', r'$\pi$', r'$2\pi$', r'$3\pi$', r'$4\pi$', r'$5\pi$']) box_2 = axs[2].get_position() axs[2].set_position([box_2.x0 - 0.05, box_2.y0, box_2.width * 0.85, box_2.height]) axs[2].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[2].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[2].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[2].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[2].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') plt.savefig('jss_figures/DFA_different_trends.png') plt.show() # plot 6b fig, axs = plt.subplots(3, 1) plt.suptitle(textwrap.fill('Comparison of Trends Extracted with Different Knot Sequences Zoomed Region', 40)) plt.subplots_adjust(hspace=0.1) axs[0].plot(time, time_series, label='Time series') axs[0].plot(time, imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 1, IMF 2, & IMF 3 with 51 knots', 21)) for knot in knots_51: axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[0].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[0].set_xticklabels(['', '', '', '', '', '']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[0].set_ylim(-5.5, 5.5) axs[0].set_xlim(0.95 * np.pi, 1.55 * np.pi) axs[1].plot(time, time_series, label='Time series') axs[1].plot(time, imfs_31[1, :] + imfs_31[2, :], label=textwrap.fill('Sum of IMF 1 and IMF 2 with 31 knots', 19)) axs[1].plot(time, imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 2 and IMF 3 with 51 knots', 19)) for knot in knots_31: axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[1].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[1].set_xticklabels(['', '', '', '', '', '']) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) axs[1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].set_ylim(-5.5, 5.5) axs[1].set_xlim(0.95 * np.pi, 1.55 * np.pi) axs[2].plot(time, time_series, label='Time series') axs[2].plot(time, imfs_11[1, :], label='IMF 1 with 11 knots') axs[2].plot(time, imfs_31[2, :], label='IMF 2 with 31 knots') axs[2].plot(time, imfs_51[3, :], label='IMF 3 with 51 knots') for knot in knots_11: axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[2].set_xticks([np.pi, (3 / 2) * np.pi]) axs[2].set_xticklabels([r'$\pi$', r'$\frac{3}{2}\pi$']) box_2 = axs[2].get_position() axs[2].set_position([box_2.x0 - 0.05, box_2.y0, box_2.width * 0.85, box_2.height]) axs[2].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[2].set_ylim(-5.5, 5.5) axs[2].set_xlim(0.95 * np.pi, 1.55 * np.pi) plt.savefig('jss_figures/DFA_different_trends_zoomed.png') plt.show() hs_ouputs = hilbert_spectrum(time, imfs_51, hts_51, ifs_51, max_frequency=12, plot=False) # plot 6c ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 0.9 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Simple Sinusoidal Time Seres with Added Noise', 50)) x_hs, y, z = hs_ouputs z_min, z_max = 0, np.abs(z).max() ax.pcolormesh(x_hs, y, np.abs(z), cmap='gist_rainbow', vmin=z_min, vmax=z_max) ax.plot(x_hs[0, :], 8 * np.ones_like(x_hs[0, :]), '--', label=r'$\omega = 8$', Linewidth=3) ax.plot(x_hs[0, :], 4 *
np.ones_like(x_hs[0, :])
numpy.ones_like
"""Test the search module""" from collections.abc import Iterable, Sized from io import StringIO from itertools import chain, product from functools import partial import pickle import sys from types import GeneratorType import re import numpy as np import scipy.sparse as sp import pytest from sklearn.utils.fixes import sp_version from sklearn.utils._testing import assert_raises from sklearn.utils._testing import assert_warns from sklearn.utils._testing import assert_warns_message from sklearn.utils._testing import assert_raise_message from sklearn.utils._testing import assert_array_equal from sklearn.utils._testing import assert_array_almost_equal from sklearn.utils._testing import assert_allclose from sklearn.utils._testing import assert_almost_equal from sklearn.utils._testing import ignore_warnings from sklearn.utils._mocking import CheckingClassifier, MockDataFrame from scipy.stats import bernoulli, expon, uniform from sklearn.base import BaseEstimator, ClassifierMixin from sklearn.base import clone from sklearn.exceptions import NotFittedError from sklearn.datasets import make_classification from sklearn.datasets import make_blobs from sklearn.datasets import make_multilabel_classification from sklearn.model_selection import fit_grid_point from sklearn.model_selection import train_test_split from sklearn.model_selection import KFold from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import StratifiedShuffleSplit from sklearn.model_selection import LeaveOneGroupOut from sklearn.model_selection import LeavePGroupsOut from sklearn.model_selection import GroupKFold from sklearn.model_selection import GroupShuffleSplit from sklearn.model_selection import GridSearchCV from sklearn.model_selection import RandomizedSearchCV from sklearn.model_selection import ParameterGrid from sklearn.model_selection import ParameterSampler from sklearn.model_selection._search import BaseSearchCV from sklearn.model_selection._validation import FitFailedWarning from sklearn.svm import LinearSVC, SVC from sklearn.tree import DecisionTreeRegressor from sklearn.tree import DecisionTreeClassifier from sklearn.cluster import KMeans from sklearn.neighbors import KernelDensity from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import f1_score from sklearn.metrics import recall_score from sklearn.metrics import accuracy_score from sklearn.metrics import make_scorer from sklearn.metrics import roc_auc_score from sklearn.metrics.pairwise import euclidean_distances from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline from sklearn.linear_model import Ridge, SGDClassifier, LinearRegression from sklearn.experimental import enable_hist_gradient_boosting # noqa from sklearn.ensemble import HistGradientBoostingClassifier from sklearn.model_selection.tests.common import OneTimeSplitter # Neither of the following two estimators inherit from BaseEstimator, # to test hyperparameter search on user-defined classifiers. class MockClassifier: """Dummy classifier to test the parameter search algorithms""" def __init__(self, foo_param=0): self.foo_param = foo_param def fit(self, X, Y): assert len(X) == len(Y) self.classes_ = np.unique(Y) return self def predict(self, T): return T.shape[0] def transform(self, X): return X + self.foo_param def inverse_transform(self, X): return X - self.foo_param predict_proba = predict predict_log_proba = predict decision_function = predict def score(self, X=None, Y=None): if self.foo_param > 1: score = 1. else: score = 0. return score def get_params(self, deep=False): return {'foo_param': self.foo_param} def set_params(self, **params): self.foo_param = params['foo_param'] return self class LinearSVCNoScore(LinearSVC): """An LinearSVC classifier that has no score method.""" @property def score(self): raise AttributeError X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) y = np.array([1, 1, 2, 2]) def assert_grid_iter_equals_getitem(grid): assert list(grid) == [grid[i] for i in range(len(grid))] @pytest.mark.parametrize("klass", [ParameterGrid, partial(ParameterSampler, n_iter=10)]) @pytest.mark.parametrize( "input, error_type, error_message", [(0, TypeError, r'Parameter .* is not a dict or a list \(0\)'), ([{'foo': [0]}, 0], TypeError, r'Parameter .* is not a dict \(0\)'), ({'foo': 0}, TypeError, "Parameter.* value is not iterable .*" r"\(key='foo', value=0\)")] ) def test_validate_parameter_input(klass, input, error_type, error_message): with pytest.raises(error_type, match=error_message): klass(input) def test_parameter_grid(): # Test basic properties of ParameterGrid. params1 = {"foo": [1, 2, 3]} grid1 = ParameterGrid(params1) assert isinstance(grid1, Iterable) assert isinstance(grid1, Sized) assert len(grid1) == 3 assert_grid_iter_equals_getitem(grid1) params2 = {"foo": [4, 2], "bar": ["ham", "spam", "eggs"]} grid2 = ParameterGrid(params2) assert len(grid2) == 6 # loop to assert we can iterate over the grid multiple times for i in range(2): # tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2) points = set(tuple(chain(*(sorted(p.items())))) for p in grid2) assert (points == set(("bar", x, "foo", y) for x, y in product(params2["bar"], params2["foo"]))) assert_grid_iter_equals_getitem(grid2) # Special case: empty grid (useful to get default estimator settings) empty = ParameterGrid({}) assert len(empty) == 1 assert list(empty) == [{}] assert_grid_iter_equals_getitem(empty) assert_raises(IndexError, lambda: empty[1]) has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}]) assert len(has_empty) == 4 assert list(has_empty) == [{'C': 1}, {'C': 10}, {}, {'C': .5}] assert_grid_iter_equals_getitem(has_empty) def test_grid_search(): # Test that the best estimator contains the right value for foo_param clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=3, verbose=3) # make sure it selects the smallest parameter in case of ties old_stdout = sys.stdout sys.stdout = StringIO() grid_search.fit(X, y) sys.stdout = old_stdout assert grid_search.best_estimator_.foo_param == 2 assert_array_equal(grid_search.cv_results_["param_foo_param"].data, [1, 2, 3]) # Smoke test the score etc: grid_search.score(X, y) grid_search.predict_proba(X) grid_search.decision_function(X) grid_search.transform(X) # Test exception handling on scoring grid_search.scoring = 'sklearn' assert_raises(ValueError, grid_search.fit, X, y) def test_grid_search_pipeline_steps(): # check that parameters that are estimators are cloned before fitting pipe = Pipeline([('regressor', LinearRegression())]) param_grid = {'regressor': [LinearRegression(), Ridge()]} grid_search = GridSearchCV(pipe, param_grid, cv=2) grid_search.fit(X, y) regressor_results = grid_search.cv_results_['param_regressor'] assert isinstance(regressor_results[0], LinearRegression) assert isinstance(regressor_results[1], Ridge) assert not hasattr(regressor_results[0], 'coef_') assert not hasattr(regressor_results[1], 'coef_') assert regressor_results[0] is not grid_search.best_estimator_ assert regressor_results[1] is not grid_search.best_estimator_ # check that we didn't modify the parameter grid that was passed assert not hasattr(param_grid['regressor'][0], 'coef_') assert not hasattr(param_grid['regressor'][1], 'coef_') @pytest.mark.parametrize("SearchCV", [GridSearchCV, RandomizedSearchCV]) def test_SearchCV_with_fit_params(SearchCV): X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = CheckingClassifier(expected_fit_params=['spam', 'eggs']) searcher = SearchCV( clf, {'foo_param': [1, 2, 3]}, cv=2, error_score="raise" ) # The CheckingClassifier generates an assertion error if # a parameter is missing or has length != len(X). err_msg = r"Expected fit parameter\(s\) \['eggs'\] not seen." with pytest.raises(AssertionError, match=err_msg): searcher.fit(X, y, spam=np.ones(10)) err_msg = "Fit parameter spam has length 1; expected" with pytest.raises(AssertionError, match=err_msg): searcher.fit(X, y, spam=np.ones(1), eggs=np.zeros(10)) searcher.fit(X, y, spam=np.ones(10), eggs=np.zeros(10)) @ignore_warnings def test_grid_search_no_score(): # Test grid-search on classifier that has no score function. clf = LinearSVC(random_state=0) X, y = make_blobs(random_state=0, centers=2) Cs = [.1, 1, 10] clf_no_score = LinearSVCNoScore(random_state=0) grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy') grid_search.fit(X, y) grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs}, scoring='accuracy') # smoketest grid search grid_search_no_score.fit(X, y) # check that best params are equal assert grid_search_no_score.best_params_ == grid_search.best_params_ # check that we can call score and that it gives the correct result assert grid_search.score(X, y) == grid_search_no_score.score(X, y) # giving no scoring function raises an error grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs}) assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit, [[1]]) def test_grid_search_score_method(): X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2, random_state=0) clf = LinearSVC(random_state=0) grid = {'C': [.1]} search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y) search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y) search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid, scoring='roc_auc' ).fit(X, y) search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y) # Check warning only occurs in situation where behavior changed: # estimator requires score method to compete with scoring parameter score_no_scoring = search_no_scoring.score(X, y) score_accuracy = search_accuracy.score(X, y) score_no_score_auc = search_no_score_method_auc.score(X, y) score_auc = search_auc.score(X, y) # ensure the test is sane assert score_auc < 1.0 assert score_accuracy < 1.0 assert score_auc != score_accuracy assert_almost_equal(score_accuracy, score_no_scoring) assert_almost_equal(score_auc, score_no_score_auc) def test_grid_search_groups(): # Check if ValueError (when groups is None) propagates to GridSearchCV # And also check if groups is correctly passed to the cv object rng = np.random.RandomState(0) X, y = make_classification(n_samples=15, n_classes=2, random_state=0) groups = rng.randint(0, 3, 15) clf = LinearSVC(random_state=0) grid = {'C': [1]} group_cvs = [LeaveOneGroupOut(), LeavePGroupsOut(2), GroupKFold(n_splits=3), GroupShuffleSplit()] for cv in group_cvs: gs = GridSearchCV(clf, grid, cv=cv) assert_raise_message(ValueError, "The 'groups' parameter should not be None.", gs.fit, X, y) gs.fit(X, y, groups=groups) non_group_cvs = [StratifiedKFold(), StratifiedShuffleSplit()] for cv in non_group_cvs: gs = GridSearchCV(clf, grid, cv=cv) # Should not raise an error gs.fit(X, y) def test_classes__property(): # Test that classes_ property matches best_estimator_.classes_ X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) Cs = [.1, 1, 10] grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs}) grid_search.fit(X, y) assert_array_equal(grid_search.best_estimator_.classes_, grid_search.classes_) # Test that regressors do not have a classes_ attribute grid_search = GridSearchCV(Ridge(), {'alpha': [1.0, 2.0]}) grid_search.fit(X, y) assert not hasattr(grid_search, 'classes_') # Test that the grid searcher has no classes_ attribute before it's fit grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs}) assert not hasattr(grid_search, 'classes_') # Test that the grid searcher has no classes_ attribute without a refit grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs}, refit=False) grid_search.fit(X, y) assert not hasattr(grid_search, 'classes_') def test_trivial_cv_results_attr(): # Test search over a "grid" with only one point. clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1]}, cv=3) grid_search.fit(X, y) assert hasattr(grid_search, "cv_results_") random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1, cv=3) random_search.fit(X, y) assert hasattr(grid_search, "cv_results_") def test_no_refit(): # Test that GSCV can be used for model selection alone without refitting clf = MockClassifier() for scoring in [None, ['accuracy', 'precision']]: grid_search = GridSearchCV( clf, {'foo_param': [1, 2, 3]}, refit=False, cv=3 ) grid_search.fit(X, y) assert not hasattr(grid_search, "best_estimator_") and \ hasattr(grid_search, "best_index_") and \ hasattr(grid_search, "best_params_") # Make sure the functions predict/transform etc raise meaningful # error messages for fn_name in ('predict', 'predict_proba', 'predict_log_proba', 'transform', 'inverse_transform'): assert_raise_message(NotFittedError, ('refit=False. %s is available only after ' 'refitting on the best parameters' % fn_name), getattr(grid_search, fn_name), X) # Test that an invalid refit param raises appropriate error messages for refit in ["", 5, True, 'recall', 'accuracy']: assert_raise_message(ValueError, "For multi-metric scoring, the " "parameter refit must be set to a scorer key", GridSearchCV(clf, {}, refit=refit, scoring={'acc': 'accuracy', 'prec': 'precision'} ).fit, X, y) def test_grid_search_error(): # Test that grid search will capture errors on data with different length X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) assert_raises(ValueError, cv.fit, X_[:180], y_) def test_grid_search_one_grid_point(): X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]} clf = SVC(gamma='auto') cv = GridSearchCV(clf, param_dict) cv.fit(X_, y_) clf = SVC(C=1.0, kernel="rbf", gamma=0.1) clf.fit(X_, y_) assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_) def test_grid_search_when_param_grid_includes_range(): # Test that the best estimator contains the right value for foo_param clf = MockClassifier() grid_search = None grid_search = GridSearchCV(clf, {'foo_param': range(1, 4)}, cv=3) grid_search.fit(X, y) assert grid_search.best_estimator_.foo_param == 2 def test_grid_search_bad_param_grid(): param_dict = {"C": 1} clf = SVC(gamma='auto') assert_raise_message( ValueError, "Parameter grid for parameter (C) needs to" " be a list or numpy array, but got (<class 'int'>)." " Single values need to be wrapped in a list" " with one element.", GridSearchCV, clf, param_dict) param_dict = {"C": []} clf = SVC() assert_raise_message( ValueError, "Parameter values for parameter (C) need to be a non-empty sequence.", GridSearchCV, clf, param_dict) param_dict = {"C": "1,2,3"} clf = SVC(gamma='auto') assert_raise_message( ValueError, "Parameter grid for parameter (C) needs to" " be a list or numpy array, but got (<class 'str'>)." " Single values need to be wrapped in a list" " with one element.", GridSearchCV, clf, param_dict) param_dict = {"C": np.ones((3, 2))} clf = SVC() assert_raises(ValueError, GridSearchCV, clf, param_dict) def test_grid_search_sparse(): # Test that grid search works with both dense and sparse matrices X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) cv.fit(X_[:180], y_[:180]) y_pred = cv.predict(X_[180:]) C = cv.best_estimator_.C X_ = sp.csr_matrix(X_) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) cv.fit(X_[:180].tocoo(), y_[:180]) y_pred2 = cv.predict(X_[180:]) C2 = cv.best_estimator_.C assert np.mean(y_pred == y_pred2) >= .9 assert C == C2 def test_grid_search_sparse_scoring(): X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1") cv.fit(X_[:180], y_[:180]) y_pred = cv.predict(X_[180:]) C = cv.best_estimator_.C X_ = sp.csr_matrix(X_) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1") cv.fit(X_[:180], y_[:180]) y_pred2 = cv.predict(X_[180:]) C2 = cv.best_estimator_.C assert_array_equal(y_pred, y_pred2) assert C == C2 # Smoke test the score # np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]), # cv.score(X_[:180], y[:180])) # test loss where greater is worse def f1_loss(y_true_, y_pred_): return -f1_score(y_true_, y_pred_) F1Loss = make_scorer(f1_loss, greater_is_better=False) cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss) cv.fit(X_[:180], y_[:180]) y_pred3 = cv.predict(X_[180:]) C3 = cv.best_estimator_.C assert C == C3 assert_array_equal(y_pred, y_pred3) def test_grid_search_precomputed_kernel(): # Test that grid search works when the input features are given in the # form of a precomputed kernel matrix X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) # compute the training kernel matrix corresponding to the linear kernel K_train = np.dot(X_[:180], X_[:180].T) y_train = y_[:180] clf = SVC(kernel='precomputed') cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) cv.fit(K_train, y_train) assert cv.best_score_ >= 0 # compute the test kernel matrix K_test = np.dot(X_[180:], X_[:180].T) y_test = y_[180:] y_pred = cv.predict(K_test) assert np.mean(y_pred == y_test) >= 0 # test error is raised when the precomputed kernel is not array-like # or sparse assert_raises(ValueError, cv.fit, K_train.tolist(), y_train) def test_grid_search_precomputed_kernel_error_nonsquare(): # Test that grid search returns an error with a non-square precomputed # training kernel matrix K_train = np.zeros((10, 20)) y_train = np.ones((10, )) clf = SVC(kernel='precomputed') cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) assert_raises(ValueError, cv.fit, K_train, y_train) class BrokenClassifier(BaseEstimator): """Broken classifier that cannot be fit twice""" def __init__(self, parameter=None): self.parameter = parameter def fit(self, X, y): assert not hasattr(self, 'has_been_fit_') self.has_been_fit_ = True def predict(self, X): return np.zeros(X.shape[0]) @ignore_warnings def test_refit(): # Regression test for bug in refitting # Simulates re-fitting a broken estimator; this used to break with # sparse SVMs. X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}], scoring="precision", refit=True) clf.fit(X, y) def test_refit_callable(): """ Test refit=callable, which adds flexibility in identifying the "best" estimator. """ def refit_callable(cv_results): """ A dummy function tests `refit=callable` interface. Return the index of a model that has the least `mean_test_score`. """ # Fit a dummy clf with `refit=True` to get a list of keys in # clf.cv_results_. X, y = make_classification(n_samples=100, n_features=4, random_state=42) clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.01, 0.1, 1]}, scoring='precision', refit=True) clf.fit(X, y) # Ensure that `best_index_ != 0` for this dummy clf assert clf.best_index_ != 0 # Assert every key matches those in `cv_results` for key in clf.cv_results_.keys(): assert key in cv_results return cv_results['mean_test_score'].argmin() X, y = make_classification(n_samples=100, n_features=4, random_state=42) clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.01, 0.1, 1]}, scoring='precision', refit=refit_callable) clf.fit(X, y) assert clf.best_index_ == 0 # Ensure `best_score_` is disabled when using `refit=callable` assert not hasattr(clf, 'best_score_') def test_refit_callable_invalid_type(): """ Test implementation catches the errors when 'best_index_' returns an invalid result. """ def refit_callable_invalid_type(cv_results): """ A dummy function tests when returned 'best_index_' is not integer. """ return None X, y = make_classification(n_samples=100, n_features=4, random_state=42) clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.1, 1]}, scoring='precision', refit=refit_callable_invalid_type) with pytest.raises(TypeError, match='best_index_ returned is not an integer'): clf.fit(X, y) @pytest.mark.parametrize('out_bound_value', [-1, 2]) @pytest.mark.parametrize('search_cv', [RandomizedSearchCV, GridSearchCV]) def test_refit_callable_out_bound(out_bound_value, search_cv): """ Test implementation catches the errors when 'best_index_' returns an out of bound result. """ def refit_callable_out_bound(cv_results): """ A dummy function tests when returned 'best_index_' is out of bounds. """ return out_bound_value X, y = make_classification(n_samples=100, n_features=4, random_state=42) clf = search_cv(LinearSVC(random_state=42), {'C': [0.1, 1]}, scoring='precision', refit=refit_callable_out_bound) with pytest.raises(IndexError, match='best_index_ index out of range'): clf.fit(X, y) def test_refit_callable_multi_metric(): """ Test refit=callable in multiple metric evaluation setting """ def refit_callable(cv_results): """ A dummy function tests `refit=callable` interface. Return the index of a model that has the least `mean_test_prec`. """ assert 'mean_test_prec' in cv_results return cv_results['mean_test_prec'].argmin() X, y = make_classification(n_samples=100, n_features=4, random_state=42) scoring = {'Accuracy': make_scorer(accuracy_score), 'prec': 'precision'} clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.01, 0.1, 1]}, scoring=scoring, refit=refit_callable) clf.fit(X, y) assert clf.best_index_ == 0 # Ensure `best_score_` is disabled when using `refit=callable` assert not hasattr(clf, 'best_score_') def test_gridsearch_nd(): # Pass X as list in GridSearchCV X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2) y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11) check_X = lambda x: x.shape[1:] == (5, 3, 2) check_y = lambda x: x.shape[1:] == (7, 11) clf = CheckingClassifier( check_X=check_X, check_y=check_y, methods_to_check=["fit"], ) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}) grid_search.fit(X_4d, y_3d).score(X, y) assert hasattr(grid_search, "cv_results_") def test_X_as_list(): # Pass X as list in GridSearchCV X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = CheckingClassifier( check_X=lambda x: isinstance(x, list), methods_to_check=["fit"], ) cv = KFold(n_splits=3) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv) grid_search.fit(X.tolist(), y).score(X, y) assert hasattr(grid_search, "cv_results_") def test_y_as_list(): # Pass y as list in GridSearchCV X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = CheckingClassifier( check_y=lambda x: isinstance(x, list), methods_to_check=["fit"], ) cv = KFold(n_splits=3) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv) grid_search.fit(X, y.tolist()).score(X, y) assert hasattr(grid_search, "cv_results_") @ignore_warnings def test_pandas_input(): # check cross_val_score doesn't destroy pandas dataframe types = [(MockDataFrame, MockDataFrame)] try: from pandas import Series, DataFrame types.append((DataFrame, Series)) except ImportError: pass X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) for InputFeatureType, TargetType in types: # X dataframe, y series X_df, y_ser = InputFeatureType(X), TargetType(y) def check_df(x): return isinstance(x, InputFeatureType) def check_series(x): return isinstance(x, TargetType) clf = CheckingClassifier(check_X=check_df, check_y=check_series) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}) grid_search.fit(X_df, y_ser).score(X_df, y_ser) grid_search.predict(X_df) assert hasattr(grid_search, "cv_results_") def test_unsupervised_grid_search(): # test grid-search with unsupervised estimator X, y = make_blobs(n_samples=50, random_state=0) km = KMeans(random_state=0, init="random", n_init=1) # Multi-metric evaluation unsupervised scoring = ['adjusted_rand_score', 'fowlkes_mallows_score'] for refit in ['adjusted_rand_score', 'fowlkes_mallows_score']: grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]), scoring=scoring, refit=refit) grid_search.fit(X, y) # Both ARI and FMS can find the right number :) assert grid_search.best_params_["n_clusters"] == 3 # Single metric evaluation unsupervised grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]), scoring='fowlkes_mallows_score') grid_search.fit(X, y) assert grid_search.best_params_["n_clusters"] == 3 # Now without a score, and without y grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4])) grid_search.fit(X) assert grid_search.best_params_["n_clusters"] == 4 def test_gridsearch_no_predict(): # test grid-search with an estimator without predict. # slight duplication of a test from KDE def custom_scoring(estimator, X): return 42 if estimator.bandwidth == .1 else 0 X, _ = make_blobs(cluster_std=.1, random_state=1, centers=[[0, 1], [1, 0], [0, 0]]) search = GridSearchCV(KernelDensity(), param_grid=dict(bandwidth=[.01, .1, 1]), scoring=custom_scoring) search.fit(X) assert search.best_params_['bandwidth'] == .1 assert search.best_score_ == 42 def test_param_sampler(): # test basic properties of param sampler param_distributions = {"kernel": ["rbf", "linear"], "C": uniform(0, 1)} sampler = ParameterSampler(param_distributions=param_distributions, n_iter=10, random_state=0) samples = [x for x in sampler] assert len(samples) == 10 for sample in samples: assert sample["kernel"] in ["rbf", "linear"] assert 0 <= sample["C"] <= 1 # test that repeated calls yield identical parameters param_distributions = {"C": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} sampler = ParameterSampler(param_distributions=param_distributions, n_iter=3, random_state=0) assert [x for x in sampler] == [x for x in sampler] if sp_version >= (0, 16): param_distributions = {"C": uniform(0, 1)} sampler = ParameterSampler(param_distributions=param_distributions, n_iter=10, random_state=0) assert [x for x in sampler] == [x for x in sampler] def check_cv_results_array_types(search, param_keys, score_keys): # Check if the search `cv_results`'s array are of correct types cv_results = search.cv_results_ assert all(isinstance(cv_results[param], np.ma.MaskedArray) for param in param_keys) assert all(cv_results[key].dtype == object for key in param_keys) assert not any(isinstance(cv_results[key], np.ma.MaskedArray) for key in score_keys) assert all(cv_results[key].dtype == np.float64 for key in score_keys if not key.startswith('rank')) scorer_keys = search.scorer_.keys() if search.multimetric_ else ['score'] for key in scorer_keys: assert cv_results['rank_test_%s' % key].dtype == np.int32 def check_cv_results_keys(cv_results, param_keys, score_keys, n_cand): # Test the search.cv_results_ contains all the required results assert_array_equal(sorted(cv_results.keys()), sorted(param_keys + score_keys + ('params',))) assert all(cv_results[key].shape == (n_cand,) for key in param_keys + score_keys) def test_grid_search_cv_results(): X, y = make_classification(n_samples=50, n_features=4, random_state=42) n_splits = 3 n_grid_points = 6 params = [dict(kernel=['rbf', ], C=[1, 10], gamma=[0.1, 1]), dict(kernel=['poly', ], degree=[1, 2])] param_keys = ('param_C', 'param_degree', 'param_gamma', 'param_kernel') score_keys = ('mean_test_score', 'mean_train_score', 'rank_test_score', 'split0_test_score', 'split1_test_score', 'split2_test_score', 'split0_train_score', 'split1_train_score', 'split2_train_score', 'std_test_score', 'std_train_score', 'mean_fit_time', 'std_fit_time', 'mean_score_time', 'std_score_time') n_candidates = n_grid_points search = GridSearchCV(SVC(), cv=n_splits, param_grid=params, return_train_score=True) search.fit(X, y) cv_results = search.cv_results_ # Check if score and timing are reasonable assert all(cv_results['rank_test_score'] >= 1) assert (all(cv_results[k] >= 0) for k in score_keys if k != 'rank_test_score') assert (all(cv_results[k] <= 1) for k in score_keys if 'time' not in k and k != 'rank_test_score') # Check cv_results structure check_cv_results_array_types(search, param_keys, score_keys) check_cv_results_keys(cv_results, param_keys, score_keys, n_candidates) # Check masking cv_results = search.cv_results_ n_candidates = len(search.cv_results_['params']) assert all((cv_results['param_C'].mask[i] and cv_results['param_gamma'].mask[i] and not cv_results['param_degree'].mask[i]) for i in range(n_candidates) if cv_results['param_kernel'][i] == 'linear') assert all((not cv_results['param_C'].mask[i] and not cv_results['param_gamma'].mask[i] and cv_results['param_degree'].mask[i]) for i in range(n_candidates) if cv_results['param_kernel'][i] == 'rbf') def test_random_search_cv_results(): X, y = make_classification(n_samples=50, n_features=4, random_state=42) n_splits = 3 n_search_iter = 30 params = [{'kernel': ['rbf'], 'C': expon(scale=10), 'gamma': expon(scale=0.1)}, {'kernel': ['poly'], 'degree': [2, 3]}] param_keys = ('param_C', 'param_degree', 'param_gamma', 'param_kernel') score_keys = ('mean_test_score', 'mean_train_score', 'rank_test_score', 'split0_test_score', 'split1_test_score', 'split2_test_score', 'split0_train_score', 'split1_train_score', 'split2_train_score', 'std_test_score', 'std_train_score', 'mean_fit_time', 'std_fit_time', 'mean_score_time', 'std_score_time') n_cand = n_search_iter search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_splits, param_distributions=params, return_train_score=True) search.fit(X, y) cv_results = search.cv_results_ # Check results structure check_cv_results_array_types(search, param_keys, score_keys) check_cv_results_keys(cv_results, param_keys, score_keys, n_cand) n_candidates = len(search.cv_results_['params']) assert all((cv_results['param_C'].mask[i] and cv_results['param_gamma'].mask[i] and not cv_results['param_degree'].mask[i]) for i in range(n_candidates) if cv_results['param_kernel'][i] == 'linear') assert all((not cv_results['param_C'].mask[i] and not cv_results['param_gamma'].mask[i] and cv_results['param_degree'].mask[i]) for i in range(n_candidates) if cv_results['param_kernel'][i] == 'rbf') @pytest.mark.parametrize( "SearchCV, specialized_params", [(GridSearchCV, {'param_grid': {'C': [1, 10]}}), (RandomizedSearchCV, {'param_distributions': {'C': [1, 10]}, 'n_iter': 2})] ) def test_search_default_iid(SearchCV, specialized_params): # Test the IID parameter TODO: Clearly this test does something else??? # noise-free simple 2d-data X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0, cluster_std=0.1, shuffle=False, n_samples=80) # split dataset into two folds that are not iid # first one contains data of all 4 blobs, second only from two. mask = np.ones(X.shape[0], dtype=np.bool) mask[np.where(y == 1)[0][::2]] = 0 mask[np.where(y == 2)[0][::2]] = 0 # this leads to perfect classification on one fold and a score of 1/3 on # the other # create "cv" for splits cv = [[mask, ~mask], [~mask, mask]] common_params = {'estimator': SVC(), 'cv': cv, 'return_train_score': True} search = SearchCV(**common_params, **specialized_params) search.fit(X, y) test_cv_scores = np.array( [search.cv_results_['split%d_test_score' % s][0] for s in range(search.n_splits_)] ) test_mean = search.cv_results_['mean_test_score'][0] test_std = search.cv_results_['std_test_score'][0] train_cv_scores = np.array( [search.cv_results_['split%d_train_score' % s][0] for s in range(search.n_splits_)] ) train_mean = search.cv_results_['mean_train_score'][0] train_std = search.cv_results_['std_train_score'][0] assert search.cv_results_['param_C'][0] == 1 # scores are the same as above assert_allclose(test_cv_scores, [1, 1. / 3.]) assert_allclose(train_cv_scores, [1, 1]) # Unweighted mean/std is used assert test_mean == pytest.approx(
np.mean(test_cv_scores)
numpy.mean
''' ------------------------------------------------------------------------------------------------- This code accompanies the paper titled "Human injury-based safety decision of automated vehicles" Author: <NAME>, <NAME>, <NAME>, <NAME> Corresponding author: <NAME> (<EMAIL>) ------------------------------------------------------------------------------------------------- ''' import torch import numpy as np from torch import nn from torch.nn.utils import weight_norm __author__ = "<NAME>" def Collision_cond(veh_striking_list, V1_v, V2_v, delta_angle, veh_param): ''' Estimate the collision condition. ''' (veh_l, veh_w, veh_cgf, veh_cgs, veh_k, veh_m) = veh_param delta_angle_2 = np.arccos(np.abs(np.cos(delta_angle))) if -1e-6 < delta_angle_2 < 1e-6: delta_angle_2 = 1e-6 delta_v1_list = [] delta_v2_list = [] # Estimate the collision condition (delat-v) according to the principal impact direction. for veh_striking in veh_striking_list: if veh_striking[0] == 1: veh_ca = np.arctan(veh_cgf[0] / veh_cgs[0]) veh_a2 = np.abs(veh_cgs[1] - veh_striking[3]) veh_RDS = np.abs(V1_v * np.cos(delta_angle) - V2_v) veh_a1 = np.abs(
np.sqrt(veh_cgf[0] ** 2 + veh_cgs[0] ** 2)
numpy.sqrt
# ________ # / # \ / # \ / # \/ import random import textwrap import emd_mean import AdvEMDpy import emd_basis import emd_utils import numpy as np import pandas as pd import cvxpy as cvx import seaborn as sns import matplotlib.pyplot as plt from scipy.integrate import odeint from scipy.ndimage import gaussian_filter from emd_utils import time_extension, Utility from scipy.interpolate import CubicSpline from emd_hilbert import Hilbert, hilbert_spectrum from emd_preprocess import Preprocess from emd_mean import Fluctuation from AdvEMDpy import EMD # alternate packages from PyEMD import EMD as pyemd0215 import emd as emd040 sns.set(style='darkgrid') pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001) pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time) pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series) # plot 0 - addition fig = plt.figure(figsize=(9, 4)) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('First Iteration of Sifting Algorithm') plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1) plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()], c='r', label=r'$M(t_i)$', zorder=2) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4) plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()], c='c', label=r'$m(t_j)$', zorder=3) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5) plt.yticks(ticks=[-2, -1, 0, 1, 2]) plt.xticks(ticks=[0, np.pi, 2 * np.pi], labels=[r'0', r'$\pi$', r'$2\pi$']) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/pseudo_algorithm.png') plt.show() knots = np.arange(12) time = np.linspace(0, 11, 1101) basis = emd_basis.Basis(time=time, time_series=time) b_spline_basis = basis.cubic_b_spline(knots) chsi_basis = basis.chsi_basis(knots) # plot 1 plt.title('Non-Natural Cubic B-Spline Bases at Boundary') plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $') plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $') plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $') plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $') plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $') plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $']) plt.xlim(4.4, 6.6) plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.legend(loc='upper left') plt.savefig('jss_figures/boundary_bases.png') plt.show() # plot 1a - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) knots_uniform = np.linspace(0, 2 * np.pi, 51) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0] fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Uniform Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Uniform Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Uniform Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots_uniform)): axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_uniform.png') plt.show() # plot 1b - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=1, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Statically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Statically Optimised Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Statically Optimised Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots)): axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_1.png') plt.show() # plot 1c - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=2, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Dynamically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Dynamically Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Dynamically Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots[i])): axs[i].plot(knots[i][j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_2.png') plt.show() # plot 1d - addition window = 81 fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Filtering Demonstration') axs[1].set_title('Zoomed Region') preprocess_time = pseudo_alg_time.copy() np.random.seed(1) random.seed(1) preprocess_time_series = pseudo_alg_time_series + np.random.normal(0, 0.1, len(preprocess_time)) for i in random.sample(range(1000), 500): preprocess_time_series[i] += np.random.normal(0, 1) preprocess = Preprocess(time=preprocess_time, time_series=preprocess_time_series) axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[0].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[0].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[0].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[1].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[1].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[1].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_filter.png') plt.show() # plot 1e - addition fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Smoothing Demonstration') axs[1].set_title('Zoomed Region') axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[0].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) downsampled_and_decimated = preprocess.downsample() axs[0].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 11)) downsampled = preprocess.downsample(decimate=False) axs[0].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[1].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) axs[1].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 13)) axs[1].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.06, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.06, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_smooth.png') plt.show() # plot 2 fig, axs = plt.subplots(1, 2, sharey=True) axs[0].set_title('Cubic B-Spline Bases') axs[0].plot(time, b_spline_basis[2, :].T, '--', label='Basis 1') axs[0].plot(time, b_spline_basis[3, :].T, '--', label='Basis 2') axs[0].plot(time, b_spline_basis[4, :].T, '--', label='Basis 3') axs[0].plot(time, b_spline_basis[5, :].T, '--', label='Basis 4') axs[0].legend(loc='upper left') axs[0].plot(5 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].plot(6 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].set_xticks([5, 6]) axs[0].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[0].set_xlim(4.5, 6.5) axs[1].set_title('Cubic Hermite Spline Bases') axs[1].plot(time, chsi_basis[10, :].T, '--') axs[1].plot(time, chsi_basis[11, :].T, '--') axs[1].plot(time, chsi_basis[12, :].T, '--') axs[1].plot(time, chsi_basis[13, :].T, '--') axs[1].plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].set_xticks([5, 6]) axs[1].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[1].set_xlim(4.5, 6.5) plt.savefig('jss_figures/comparing_bases.png') plt.show() # plot 3 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_dash = maxima_y[-1] * np.ones_like(max_dash_time) min_dash_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_dash = minima_y[-1] * np.ones_like(min_dash_time) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) max_discard = maxima_y[-1] max_discard_time = minima_x[-1] - maxima_x[-1] + minima_x[-1] max_discard_dash_time = np.linspace(max_discard_time - width, max_discard_time + width, 101) max_discard_dash = max_discard * np.ones_like(max_discard_dash_time) dash_2_time = np.linspace(minima_x[-1], max_discard_time, 101) dash_2 = np.linspace(minima_y[-1], max_discard, 101) end_point_time = time[-1] end_point = time_series[-1] time_reflect = np.linspace((5 - a) * np.pi, (5 + a) * np.pi, 101) time_series_reflect = np.flip(np.cos(np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101)) + np.cos(5 * np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101))) time_series_anti_reflect = time_series_reflect[0] - time_series_reflect utils = emd_utils.Utility(time=time, time_series=time_series_anti_reflect) anti_max_bool = utils.max_bool_func_1st_order_fd() anti_max_point_time = time_reflect[anti_max_bool] anti_max_point = time_series_anti_reflect[anti_max_bool] utils = emd_utils.Utility(time=time, time_series=time_series_reflect) no_anchor_max_time = time_reflect[utils.max_bool_func_1st_order_fd()] no_anchor_max = time_series_reflect[utils.max_bool_func_1st_order_fd()] point_1 = 5.4 length_distance = np.linspace(maxima_y[-1], minima_y[-1], 101) length_distance_time = point_1 * np.pi * np.ones_like(length_distance) length_time = np.linspace(point_1 * np.pi - width, point_1 * np.pi + width, 101) length_top = maxima_y[-1] * np.ones_like(length_time) length_bottom = minima_y[-1] * np.ones_like(length_time) point_2 = 5.2 length_distance_2 = np.linspace(time_series[-1], minima_y[-1], 101) length_distance_time_2 = point_2 * np.pi *
np.ones_like(length_distance_2)
numpy.ones_like
# ________ # / # \ / # \ / # \/ import random import textwrap import emd_mean import AdvEMDpy import emd_basis import emd_utils import numpy as np import pandas as pd import cvxpy as cvx import seaborn as sns import matplotlib.pyplot as plt from scipy.integrate import odeint from scipy.ndimage import gaussian_filter from emd_utils import time_extension, Utility from scipy.interpolate import CubicSpline from emd_hilbert import Hilbert, hilbert_spectrum from emd_preprocess import Preprocess from emd_mean import Fluctuation from AdvEMDpy import EMD # alternate packages from PyEMD import EMD as pyemd0215 import emd as emd040 sns.set(style='darkgrid') pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001) pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time) pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series) # plot 0 - addition fig = plt.figure(figsize=(9, 4)) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('First Iteration of Sifting Algorithm') plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1) plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()], c='r', label=r'$M(t_i)$', zorder=2) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4) plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()], c='c', label=r'$m(t_j)$', zorder=3) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5) plt.yticks(ticks=[-2, -1, 0, 1, 2]) plt.xticks(ticks=[0, np.pi, 2 * np.pi], labels=[r'0', r'$\pi$', r'$2\pi$']) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/pseudo_algorithm.png') plt.show() knots = np.arange(12) time = np.linspace(0, 11, 1101) basis = emd_basis.Basis(time=time, time_series=time) b_spline_basis = basis.cubic_b_spline(knots) chsi_basis = basis.chsi_basis(knots) # plot 1 plt.title('Non-Natural Cubic B-Spline Bases at Boundary') plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $') plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $') plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $') plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $') plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $') plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $']) plt.xlim(4.4, 6.6) plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.legend(loc='upper left') plt.savefig('jss_figures/boundary_bases.png') plt.show() # plot 1a - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) knots_uniform = np.linspace(0, 2 * np.pi, 51) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0] fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Uniform Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Uniform Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Uniform Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots_uniform)): axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_uniform.png') plt.show() # plot 1b - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=1, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Statically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Statically Optimised Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Statically Optimised Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots)): axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_1.png') plt.show() # plot 1c - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=2, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Dynamically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Dynamically Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Dynamically Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots[i])): axs[i].plot(knots[i][j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_2.png') plt.show() # plot 1d - addition window = 81 fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Filtering Demonstration') axs[1].set_title('Zoomed Region') preprocess_time = pseudo_alg_time.copy() np.random.seed(1) random.seed(1) preprocess_time_series = pseudo_alg_time_series + np.random.normal(0, 0.1, len(preprocess_time)) for i in random.sample(range(1000), 500): preprocess_time_series[i] += np.random.normal(0, 1) preprocess = Preprocess(time=preprocess_time, time_series=preprocess_time_series) axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[0].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[0].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[0].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[1].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[1].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[1].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_filter.png') plt.show() # plot 1e - addition fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Smoothing Demonstration') axs[1].set_title('Zoomed Region') axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[0].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) downsampled_and_decimated = preprocess.downsample() axs[0].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 11)) downsampled = preprocess.downsample(decimate=False) axs[0].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[1].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) axs[1].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 13)) axs[1].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.06, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.06, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_smooth.png') plt.show() # plot 2 fig, axs = plt.subplots(1, 2, sharey=True) axs[0].set_title('Cubic B-Spline Bases') axs[0].plot(time, b_spline_basis[2, :].T, '--', label='Basis 1') axs[0].plot(time, b_spline_basis[3, :].T, '--', label='Basis 2') axs[0].plot(time, b_spline_basis[4, :].T, '--', label='Basis 3') axs[0].plot(time, b_spline_basis[5, :].T, '--', label='Basis 4') axs[0].legend(loc='upper left') axs[0].plot(5 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].plot(6 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].set_xticks([5, 6]) axs[0].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[0].set_xlim(4.5, 6.5) axs[1].set_title('Cubic Hermite Spline Bases') axs[1].plot(time, chsi_basis[10, :].T, '--') axs[1].plot(time, chsi_basis[11, :].T, '--') axs[1].plot(time, chsi_basis[12, :].T, '--') axs[1].plot(time, chsi_basis[13, :].T, '--') axs[1].plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].set_xticks([5, 6]) axs[1].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[1].set_xlim(4.5, 6.5) plt.savefig('jss_figures/comparing_bases.png') plt.show() # plot 3 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_dash = maxima_y[-1] * np.ones_like(max_dash_time) min_dash_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_dash = minima_y[-1] * np.ones_like(min_dash_time) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) max_discard = maxima_y[-1] max_discard_time = minima_x[-1] - maxima_x[-1] + minima_x[-1] max_discard_dash_time = np.linspace(max_discard_time - width, max_discard_time + width, 101) max_discard_dash = max_discard * np.ones_like(max_discard_dash_time) dash_2_time = np.linspace(minima_x[-1], max_discard_time, 101) dash_2 = np.linspace(minima_y[-1], max_discard, 101) end_point_time = time[-1] end_point = time_series[-1] time_reflect = np.linspace((5 - a) * np.pi, (5 + a) * np.pi, 101) time_series_reflect = np.flip(np.cos(np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101)) + np.cos(5 * np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101))) time_series_anti_reflect = time_series_reflect[0] - time_series_reflect utils = emd_utils.Utility(time=time, time_series=time_series_anti_reflect) anti_max_bool = utils.max_bool_func_1st_order_fd() anti_max_point_time = time_reflect[anti_max_bool] anti_max_point = time_series_anti_reflect[anti_max_bool] utils = emd_utils.Utility(time=time, time_series=time_series_reflect) no_anchor_max_time = time_reflect[utils.max_bool_func_1st_order_fd()] no_anchor_max = time_series_reflect[utils.max_bool_func_1st_order_fd()] point_1 = 5.4 length_distance = np.linspace(maxima_y[-1], minima_y[-1], 101) length_distance_time = point_1 * np.pi * np.ones_like(length_distance) length_time = np.linspace(point_1 * np.pi - width, point_1 * np.pi + width, 101) length_top = maxima_y[-1] * np.ones_like(length_time) length_bottom = minima_y[-1] * np.ones_like(length_time) point_2 = 5.2 length_distance_2 = np.linspace(time_series[-1], minima_y[-1], 101) length_distance_time_2 = point_2 * np.pi * np.ones_like(length_distance_2) length_time_2 = np.linspace(point_2 * np.pi - width, point_2 * np.pi + width, 101) length_top_2 = time_series[-1] * np.ones_like(length_time_2) length_bottom_2 = minima_y[-1] * np.ones_like(length_time_2) symmetry_axis_1_time = minima_x[-1] * np.ones(101) symmetry_axis_2_time = time[-1] * np.ones(101) symmetry_axis = np.linspace(-2, 2, 101) end_time =
np.linspace(time[-1] - width, time[-1] + width, 101)
numpy.linspace
import numpy as np from typing import Tuple, Union, Optional from autoarray.structures.arrays.two_d import array_2d_util from autoarray.geometry import geometry_util from autoarray import numba_util from autoarray.mask import mask_2d_util @numba_util.jit() def grid_2d_centre_from(grid_2d_slim: np.ndarray) -> Tuple[float, float]: """ Returns the centre of a grid from a 1D grid. Parameters ---------- grid_2d_slim The 1D grid of values which are mapped to a 2D array. Returns ------- (float, float) The (y,x) central coordinates of the grid. """ centre_y = (np.max(grid_2d_slim[:, 0]) + np.min(grid_2d_slim[:, 0])) / 2.0 centre_x = (np.max(grid_2d_slim[:, 1]) + np.min(grid_2d_slim[:, 1])) / 2.0 return centre_y, centre_x @numba_util.jit() def grid_2d_slim_via_mask_from( mask_2d: np.ndarray, pixel_scales: Union[float, Tuple[float, float]], sub_size: int, origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided into a finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes the (y,x) scaled coordinates a the centre of every sub-pixel defined by this 2D mask array. The sub-grid is returned on an array of shape (total_unmasked_pixels*sub_size**2, 2). y coordinates are stored in the 0 index of the second dimension, x coordinates in the 1 index. Masked coordinates are therefore removed and not included in the slimmed grid. Grid2D are defined from the top-left corner, where the first unmasked sub-pixel corresponds to index 0. Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth. Parameters ---------- mask_2d A 2D array of bools, where `False` values are unmasked and therefore included as part of the calculated sub-grid. pixel_scales The (y,x) scaled units to pixel units conversion factor of the 2D mask array. sub_size The size of the sub-grid that each pixel of the 2D mask array is divided into. origin : (float, flloat) The (y,x) origin of the 2D array, which the sub-grid is shifted around. Returns ------- ndarray A slimmed sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask array. The sub grid array has dimensions (total_unmasked_pixels*sub_size**2, 2). Examples -------- mask = np.array([[True, False, True], [False, False, False] [True, False, True]]) grid_slim = grid_2d_slim_via_mask_from(mask=mask, pixel_scales=(0.5, 0.5), sub_size=1, origin=(0.0, 0.0)) """ total_sub_pixels = mask_2d_util.total_sub_pixels_2d_from(mask_2d, sub_size) grid_slim = np.zeros(shape=(total_sub_pixels, 2)) centres_scaled = geometry_util.central_scaled_coordinate_2d_from( shape_native=mask_2d.shape, pixel_scales=pixel_scales, origin=origin ) sub_index = 0 y_sub_half = pixel_scales[0] / 2 y_sub_step = pixel_scales[0] / (sub_size) x_sub_half = pixel_scales[1] / 2 x_sub_step = pixel_scales[1] / (sub_size) for y in range(mask_2d.shape[0]): for x in range(mask_2d.shape[1]): if not mask_2d[y, x]: y_scaled = (y - centres_scaled[0]) * pixel_scales[0] x_scaled = (x - centres_scaled[1]) * pixel_scales[1] for y1 in range(sub_size): for x1 in range(sub_size): grid_slim[sub_index, 0] = -( y_scaled - y_sub_half + y1 * y_sub_step + (y_sub_step / 2.0) ) grid_slim[sub_index, 1] = ( x_scaled - x_sub_half + x1 * x_sub_step + (x_sub_step / 2.0) ) sub_index += 1 return grid_slim def grid_2d_via_mask_from( mask_2d: np.ndarray, pixel_scales: Union[float, Tuple[float, float]], sub_size: int, origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided into a finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes the (y,x) scaled coordinates at the centre of every sub-pixel defined by this 2D mask array. The sub-grid is returned in its native dimensions with shape (total_y_pixels*sub_size, total_x_pixels*sub_size). y coordinates are stored in the 0 index of the second dimension, x coordinates in the 1 index. Masked pixels are given values (0.0, 0.0). Grids are defined from the top-left corner, where the first unmasked sub-pixel corresponds to index 0. Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth. Parameters ---------- mask_2d A 2D array of bools, where `False` values are unmasked and therefore included as part of the calculated sub-grid. pixel_scales The (y,x) scaled units to pixel units conversion factor of the 2D mask array. sub_size The size of the sub-grid that each pixel of the 2D mask array is divided into. origin : (float, flloat) The (y,x) origin of the 2D array, which the sub-grid is shifted around. Returns ------- ndarray A sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask array. The sub grid array has dimensions (total_y_pixels*sub_size, total_x_pixels*sub_size). Examples -------- mask = np.array([[True, False, True], [False, False, False] [True, False, True]]) grid_2d = grid_2d_via_mask_from(mask=mask, pixel_scales=(0.5, 0.5), sub_size=1, origin=(0.0, 0.0)) """ grid_2d_slim = grid_2d_slim_via_mask_from( mask_2d=mask_2d, pixel_scales=pixel_scales, sub_size=sub_size, origin=origin ) return grid_2d_native_from( grid_2d_slim=grid_2d_slim, mask_2d=mask_2d, sub_size=sub_size ) def grid_2d_slim_via_shape_native_from( shape_native: Tuple[int, int], pixel_scales: Union[float, Tuple[float, float]], sub_size: int, origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided into a finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes the (y,x) scaled coordinates at the centre of every sub-pixel defined by this 2D mask array. The sub-grid is returned in its slimmed dimensions with shape (total_pixels**2*sub_size**2, 2). y coordinates are stored in the 0 index of the second dimension, x coordinates in the 1 index. Grid2D are defined from the top-left corner, where the first sub-pixel corresponds to index [0,0]. Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth. Parameters ---------- shape_native The (y,x) shape of the 2D array the sub-grid of coordinates is computed for. pixel_scales The (y,x) scaled units to pixel units conversion factor of the 2D mask array. sub_size The size of the sub-grid that each pixel of the 2D mask array is divided into. origin The (y,x) origin of the 2D array, which the sub-grid is shifted around. Returns ------- ndarray A sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask array. The sub grid is slimmed and has dimensions (total_unmasked_pixels*sub_size**2, 2). Examples -------- mask = np.array([[True, False, True], [False, False, False] [True, False, True]]) grid_2d_slim = grid_2d_slim_via_shape_native_from(shape_native=(3,3), pixel_scales=(0.5, 0.5), sub_size=2, origin=(0.0, 0.0)) """ return grid_2d_slim_via_mask_from( mask_2d=np.full(fill_value=False, shape=shape_native), pixel_scales=pixel_scales, sub_size=sub_size, origin=origin, ) def grid_2d_via_shape_native_from( shape_native: Tuple[int, int], pixel_scales: Union[float, Tuple[float, float]], sub_size: int, origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided into a finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes the (y,x) scaled coordinates at the centre of every sub-pixel defined by this 2D mask array. The sub-grid is returned in its native dimensions with shape (total_y_pixels*sub_size, total_x_pixels*sub_size). y coordinates are stored in the 0 index of the second dimension, x coordinates in the 1 index. Grids are defined from the top-left corner, where the first sub-pixel corresponds to index [0,0]. Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth. Parameters ---------- shape_native The (y,x) shape of the 2D array the sub-grid of coordinates is computed for. pixel_scales The (y,x) scaled units to pixel units conversion factor of the 2D mask array. sub_size The size of the sub-grid that each pixel of the 2D mask array is divided into. origin : (float, flloat) The (y,x) origin of the 2D array, which the sub-grid is shifted around. Returns ------- ndarray A sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask array. The sub grid array has dimensions (total_y_pixels*sub_size, total_x_pixels*sub_size). Examples -------- grid_2d = grid_2d_via_shape_native_from(shape_native=(3, 3), pixel_scales=(1.0, 1.0), sub_size=2, origin=(0.0, 0.0)) """ return grid_2d_via_mask_from( mask_2d=np.full(fill_value=False, shape=shape_native), pixel_scales=pixel_scales, sub_size=sub_size, origin=origin, ) @numba_util.jit() def grid_scaled_2d_slim_radial_projected_from( extent: np.ndarray, centre: Tuple[float, float], pixel_scales: Union[float, Tuple[float, float]], sub_size: int, shape_slim: Optional[int] = 0, ) -> np.ndarray: """ Determine a projected radial grid of points from a 2D region of coordinates defined by an extent [xmin, xmax, ymin, ymax] and with a (y,x) centre. This functions operates as follows: 1) Given the region defined by the extent [xmin, xmax, ymin, ymax], the algorithm finds the longest 1D distance of the 4 paths from the (y,x) centre to the edge of the region (e.g. following the positive / negative y and x axes). 2) Use the pixel-scale corresponding to the direction chosen (e.g. if the positive x-axis was the longest, the pixel_scale in the x dimension is used). 3) Determine the number of pixels between the centre and the edge of the region using the longest path between the two chosen above. 4) Create a (y,x) grid of radial points where all points are at the centre's y value = 0.0 and the x values iterate from the centre in increasing steps of the pixel-scale. 5) Rotate these radial coordinates by the input `angle` clockwise. A schematric is shown below: ------------------- | | |<- - - - ->x | x = centre | | <-> = longest radial path from centre to extent edge | | ------------------- Using the centre x above, this function finds the longest radial path to the edge of the extent window. The returned `grid_radii` represents a radial set of points that in 1D sample the 2D grid outwards from its centre. This grid stores the radial coordinates as (y,x) values (where all y values are the same) as opposed to a 1D data structure so that it can be used in functions which require that a 2D grid structure is input. Parameters ---------- extent The extent of the grid the radii grid is computed using, with format [xmin, xmax, ymin, ymax] centre : (float, flloat) The (y,x) central coordinate which the radial grid is traced outwards from. pixel_scales The (y,x) scaled units to pixel units conversion factor of the 2D mask array. sub_size The size of the sub-grid that each pixel of the 2D mask array is divided into. shape_slim Manually choose the shape of the 1D projected grid that is returned. If 0, the border based on the 2D grid is used (due to numba None cannot be used as a default value). Returns ------- ndarray A radial set of points sampling the longest distance from the centre to the edge of the extent in along the positive x-axis. """ distance_to_positive_x = extent[1] - centre[1] distance_to_positive_y = extent[3] - centre[0] distance_to_negative_x = centre[1] - extent[0] distance_to_negative_y = centre[0] - extent[2] scaled_distance = max( [ distance_to_positive_x, distance_to_positive_y, distance_to_negative_x, distance_to_negative_y, ] ) if (scaled_distance == distance_to_positive_y) or ( scaled_distance == distance_to_negative_y ): pixel_scale = pixel_scales[0] else: pixel_scale = pixel_scales[1] if shape_slim == 0: shape_slim = sub_size * int((scaled_distance / pixel_scale)) + 1 grid_scaled_2d_slim_radii = np.zeros((shape_slim, 2)) grid_scaled_2d_slim_radii[:, 0] += centre[0] radii = centre[1] for slim_index in range(shape_slim): grid_scaled_2d_slim_radii[slim_index, 1] = radii radii += pixel_scale / sub_size return grid_scaled_2d_slim_radii @numba_util.jit() def grid_pixels_2d_slim_from( grid_scaled_2d_slim: np.ndarray, shape_native: Tuple[int, int], pixel_scales: Union[float, Tuple[float, float]], origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ Convert a slimmed grid of 2d (y,x) scaled coordinates to a slimmed grid of 2d (y,x) pixel coordinate values. Pixel coordinates are returned as floats such that they include the decimal offset from each pixel's top-left corner relative to the input scaled coordinate. The input and output grids are both slimmed and therefore shape (total_pixels, 2). The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to the highest (most positive) y scaled coordinate and lowest (most negative) x scaled coordinate on the gird. The scaled grid is defined by an origin and coordinates are shifted to this origin before computing their 1D grid pixel coordinate values. Parameters ---------- grid_scaled_2d_slim: np.ndarray The slimmed grid of 2D (y,x) coordinates in scaled units which are converted to pixel value coordinates. shape_native The (y,x) shape of the original 2D array the scaled coordinates were computed on. pixel_scales The (y,x) scaled units to pixel units conversion factor of the original 2D array. origin : (float, flloat) The (y,x) origin of the grid, which the scaled grid is shifted to. Returns ------- ndarray A slimmed grid of 2D (y,x) pixel-value coordinates with dimensions (total_pixels, 2). Examples -------- grid_scaled_2d_slim = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]]) grid_pixels_2d_slim = grid_scaled_2d_slim_from(grid_scaled_2d_slim=grid_scaled_2d_slim, shape=(2,2), pixel_scales=(0.5, 0.5), origin=(0.0, 0.0)) """ grid_pixels_2d_slim = np.zeros((grid_scaled_2d_slim.shape[0], 2)) centres_scaled = geometry_util.central_scaled_coordinate_2d_from( shape_native=shape_native, pixel_scales=pixel_scales, origin=origin ) for slim_index in range(grid_scaled_2d_slim.shape[0]): grid_pixels_2d_slim[slim_index, 0] = ( (-grid_scaled_2d_slim[slim_index, 0] / pixel_scales[0]) + centres_scaled[0] + 0.5 ) grid_pixels_2d_slim[slim_index, 1] = ( (grid_scaled_2d_slim[slim_index, 1] / pixel_scales[1]) + centres_scaled[1] + 0.5 ) return grid_pixels_2d_slim @numba_util.jit() def grid_pixel_centres_2d_slim_from( grid_scaled_2d_slim: np.ndarray, shape_native: Tuple[int, int], pixel_scales: Union[float, Tuple[float, float]], origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ Convert a slimmed grid of 2D (y,x) scaled coordinates to a slimmed grid of 2D (y,x) pixel values. Pixel coordinates are returned as integers such that they map directly to the pixel they are contained within. The input and output grids are both slimmed and therefore shape (total_pixels, 2). The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to the highest (most positive) y scaled coordinate and lowest (most negative) x scaled coordinate on the gird. The scaled coordinate grid is defined by the class attribute origin, and coordinates are shifted to this origin before computing their 1D grid pixel indexes. Parameters ---------- grid_scaled_2d_slim: np.ndarray The slimmed grid of 2D (y,x) coordinates in scaled units which is converted to pixel indexes. shape_native The (y,x) shape of the original 2D array the scaled coordinates were computed on. pixel_scales The (y,x) scaled units to pixel units conversion factor of the original 2D array. origin : (float, flloat) The (y,x) origin of the grid, which the scaled grid is shifted Returns ------- ndarray A slimmed grid of 2D (y,x) pixel indexes with dimensions (total_pixels, 2). Examples -------- grid_scaled_2d_slim = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]]) grid_pixels_2d_slim = grid_scaled_2d_slim_from(grid_scaled_2d_slim=grid_scaled_2d_slim, shape=(2,2), pixel_scales=(0.5, 0.5), origin=(0.0, 0.0)) """ grid_pixels_2d_slim = np.zeros((grid_scaled_2d_slim.shape[0], 2)) centres_scaled = geometry_util.central_scaled_coordinate_2d_from( shape_native=shape_native, pixel_scales=pixel_scales, origin=origin ) for slim_index in range(grid_scaled_2d_slim.shape[0]): grid_pixels_2d_slim[slim_index, 0] = int( (-grid_scaled_2d_slim[slim_index, 0] / pixel_scales[0]) + centres_scaled[0] + 0.5 ) grid_pixels_2d_slim[slim_index, 1] = int( (grid_scaled_2d_slim[slim_index, 1] / pixel_scales[1]) + centres_scaled[1] + 0.5 ) return grid_pixels_2d_slim @numba_util.jit() def grid_pixel_indexes_2d_slim_from( grid_scaled_2d_slim: np.ndarray, shape_native: Tuple[int, int], pixel_scales: Union[float, Tuple[float, float]], origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ Convert a slimmed grid of 2D (y,x) scaled coordinates to a slimmed grid of pixel indexes. Pixel coordinates are returned as integers such that they are the pixel from the top-left of the 2D grid going rights and then downwards. The input and output grids are both slimmed and have shapes (total_pixels, 2) and (total_pixels,). For example: The pixel at the top-left, whose native index is [0,0], corresponds to slimmed pixel index 0. The fifth pixel on the top row, whose native index is [0,5], corresponds to slimmed pixel index 4. The first pixel on the second row, whose native index is [0,1], has slimmed pixel index 10 if a row has 10 pixels. The scaled coordinate grid is defined by the class attribute origin, and coordinates are shifted to this origin before computing their 1D grid pixel indexes. The input and output grids are both of shape (total_pixels, 2). Parameters ---------- grid_scaled_2d_slim: np.ndarray The slimmed grid of 2D (y,x) coordinates in scaled units which is converted to slimmed pixel indexes. shape_native The (y,x) shape of the original 2D array the scaled coordinates were computed on. pixel_scales The (y,x) scaled units to pixel units conversion factor of the original 2D array. origin : (float, flloat) The (y,x) origin of the grid, which the scaled grid is shifted. Returns ------- ndarray A grid of slimmed pixel indexes with dimensions (total_pixels,). Examples -------- grid_scaled_2d_slim = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]]) grid_pixel_indexes_2d_slim = grid_pixel_indexes_2d_slim_from(grid_scaled_2d_slim=grid_scaled_2d_slim, shape=(2,2), pixel_scales=(0.5, 0.5), origin=(0.0, 0.0)) """ grid_pixels_2d_slim = grid_pixel_centres_2d_slim_from( grid_scaled_2d_slim=grid_scaled_2d_slim, shape_native=shape_native, pixel_scales=pixel_scales, origin=origin, ) grid_pixel_indexes_2d_slim = np.zeros(grid_pixels_2d_slim.shape[0]) for slim_index in range(grid_pixels_2d_slim.shape[0]): grid_pixel_indexes_2d_slim[slim_index] = int( grid_pixels_2d_slim[slim_index, 0] * shape_native[1] + grid_pixels_2d_slim[slim_index, 1] ) return grid_pixel_indexes_2d_slim @numba_util.jit() def grid_scaled_2d_slim_from( grid_pixels_2d_slim: np.ndarray, shape_native: Tuple[int, int], pixel_scales: Union[float, Tuple[float, float]], origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ Convert a slimmed grid of 2D (y,x) pixel coordinates to a slimmed grid of 2D (y,x) scaled values. The input and output grids are both slimmed and therefore shape (total_pixels, 2). The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to the highest (most positive) y scaled coordinate and lowest (most negative) x scaled coordinate on the gird. The scaled coordinate origin is defined by the class attribute origin, and coordinates are shifted to this origin after computing their values from the 1D grid pixel indexes. Parameters ---------- grid_pixels_2d_slim: np.ndarray The slimmed grid of (y,x) coordinates in pixel values which is converted to scaled coordinates. shape_native The (y,x) shape of the original 2D array the scaled coordinates were computed on. pixel_scales The (y,x) scaled units to pixel units conversion factor of the original 2D array. origin : (float, flloat) The (y,x) origin of the grid, which the scaled grid is shifted. Returns ------- ndarray A slimmed grid of 2d scaled coordinates with dimensions (total_pixels, 2). Examples -------- grid_pixels_2d_slim = np.array([[0,0], [0,1], [1,0], [1,1]) grid_pixels_2d_slim = grid_scaled_2d_slim_from(grid_pixels_2d_slim=grid_pixels_2d_slim, shape=(2,2), pixel_scales=(0.5, 0.5), origin=(0.0, 0.0)) """ grid_scaled_2d_slim = np.zeros((grid_pixels_2d_slim.shape[0], 2)) centres_scaled = geometry_util.central_scaled_coordinate_2d_from( shape_native=shape_native, pixel_scales=pixel_scales, origin=origin ) for slim_index in range(grid_scaled_2d_slim.shape[0]): grid_scaled_2d_slim[slim_index, 0] = ( -(grid_pixels_2d_slim[slim_index, 0] - centres_scaled[0] - 0.5) * pixel_scales[0] ) grid_scaled_2d_slim[slim_index, 1] = ( grid_pixels_2d_slim[slim_index, 1] - centres_scaled[1] - 0.5 ) * pixel_scales[1] return grid_scaled_2d_slim @numba_util.jit() def grid_pixel_centres_2d_from( grid_scaled_2d: np.ndarray, shape_native: Tuple[int, int], pixel_scales: Union[float, Tuple[float, float]], origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ Convert a native grid of 2D (y,x) scaled coordinates to a native grid of 2D (y,x) pixel values. Pixel coordinates are returned as integers such that they map directly to the pixel they are contained within. The input and output grids are both native resolution and therefore have shape (y_pixels, x_pixels, 2). The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to the highest (most positive) y scaled coordinate and lowest (most negative) x scaled coordinate on the gird. The scaled coordinate grid is defined by the class attribute origin, and coordinates are shifted to this origin before computing their 1D grid pixel indexes. Parameters ---------- grid_scaled_2d: np.ndarray The native grid of 2D (y,x) coordinates in scaled units which is converted to pixel indexes. shape_native The (y,x) shape of the original 2D array the scaled coordinates were computed on. pixel_scales The (y,x) scaled units to pixel units conversion factor of the original 2D array. origin : (float, flloat) The (y,x) origin of the grid, which the scaled grid is shifted Returns ------- ndarray A native grid of 2D (y,x) pixel indexes with dimensions (y_pixels, x_pixels, 2). Examples -------- grid_scaled_2d = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]]) grid_pixel_centres_2d = grid_pixel_centres_2d_from(grid_scaled_2d=grid_scaled_2d, shape=(2,2), pixel_scales=(0.5, 0.5), origin=(0.0, 0.0)) """ grid_pixels_2d = np.zeros((grid_scaled_2d.shape[0], grid_scaled_2d.shape[1], 2)) centres_scaled = geometry_util.central_scaled_coordinate_2d_from( shape_native=shape_native, pixel_scales=pixel_scales, origin=origin ) for y in range(grid_scaled_2d.shape[0]): for x in range(grid_scaled_2d.shape[1]): grid_pixels_2d[y, x, 0] = int( (-grid_scaled_2d[y, x, 0] / pixel_scales[0]) + centres_scaled[0] + 0.5 ) grid_pixels_2d[y, x, 1] = int( (grid_scaled_2d[y, x, 1] / pixel_scales[1]) + centres_scaled[1] + 0.5 ) return grid_pixels_2d @numba_util.jit() def relocated_grid_via_jit_from(grid, border_grid): """ Relocate the coordinates of a grid to its border if they are outside the border, where the border is defined as all pixels at the edge of the grid's mask (see *mask._border_1d_indexes*). This is performed as follows: 1: Use the mean value of the grid's y and x coordinates to determine the origin of the grid. 2: Compute the radial distance of every grid coordinate from the origin. 3: For every coordinate, find its nearest pixel in the border. 4: Determine if it is outside the border, by comparing its radial distance from the origin to its paired border pixel's radial distance. 5: If its radial distance is larger, use the ratio of radial distances to move the coordinate to the border (if its inside the border, do nothing). The method can be used on uniform or irregular grids, however for irregular grids the border of the 'image-plane' mask is used to define border pixels. Parameters ---------- grid : Grid2D The grid (uniform or irregular) whose pixels are to be relocated to the border edge if outside it. border_grid : Grid2D The grid of border (y,x) coordinates. """ grid_relocated = np.zeros(grid.shape) grid_relocated[:, :] = grid[:, :] border_origin = np.zeros(2) border_origin[0] = np.mean(border_grid[:, 0]) border_origin[1] = np.mean(border_grid[:, 1]) border_grid_radii = np.sqrt( np.add( np.square(np.subtract(border_grid[:, 0], border_origin[0])), np.square(np.subtract(border_grid[:, 1], border_origin[1])), ) ) border_min_radii = np.min(border_grid_radii) grid_radii = np.sqrt( np.add( np.square(
np.subtract(grid[:, 0], border_origin[0])
numpy.subtract
"""Test the search module""" from collections.abc import Iterable, Sized from io import StringIO from itertools import chain, product from functools import partial import pickle import sys from types import GeneratorType import re import numpy as np import scipy.sparse as sp import pytest from sklearn.utils.fixes import sp_version from sklearn.utils._testing import assert_raises from sklearn.utils._testing import assert_warns from sklearn.utils._testing import assert_warns_message from sklearn.utils._testing import assert_raise_message from sklearn.utils._testing import assert_array_equal from sklearn.utils._testing import assert_array_almost_equal from sklearn.utils._testing import assert_allclose from sklearn.utils._testing import assert_almost_equal from sklearn.utils._testing import ignore_warnings from sklearn.utils._mocking import CheckingClassifier, MockDataFrame from scipy.stats import bernoulli, expon, uniform from sklearn.base import BaseEstimator, ClassifierMixin from sklearn.base import clone from sklearn.exceptions import NotFittedError from sklearn.datasets import make_classification from sklearn.datasets import make_blobs from sklearn.datasets import make_multilabel_classification from sklearn.model_selection import fit_grid_point from sklearn.model_selection import train_test_split from sklearn.model_selection import KFold from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import StratifiedShuffleSplit from sklearn.model_selection import LeaveOneGroupOut from sklearn.model_selection import LeavePGroupsOut from sklearn.model_selection import GroupKFold from sklearn.model_selection import GroupShuffleSplit from sklearn.model_selection import GridSearchCV from sklearn.model_selection import RandomizedSearchCV from sklearn.model_selection import ParameterGrid from sklearn.model_selection import ParameterSampler from sklearn.model_selection._search import BaseSearchCV from sklearn.model_selection._validation import FitFailedWarning from sklearn.svm import LinearSVC, SVC from sklearn.tree import DecisionTreeRegressor from sklearn.tree import DecisionTreeClassifier from sklearn.cluster import KMeans from sklearn.neighbors import KernelDensity from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import f1_score from sklearn.metrics import recall_score from sklearn.metrics import accuracy_score from sklearn.metrics import make_scorer from sklearn.metrics import roc_auc_score from sklearn.metrics.pairwise import euclidean_distances from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline from sklearn.linear_model import Ridge, SGDClassifier, LinearRegression from sklearn.experimental import enable_hist_gradient_boosting # noqa from sklearn.ensemble import HistGradientBoostingClassifier from sklearn.model_selection.tests.common import OneTimeSplitter # Neither of the following two estimators inherit from BaseEstimator, # to test hyperparameter search on user-defined classifiers. class MockClassifier: """Dummy classifier to test the parameter search algorithms""" def __init__(self, foo_param=0): self.foo_param = foo_param def fit(self, X, Y): assert len(X) == len(Y) self.classes_ = np.unique(Y) return self def predict(self, T): return T.shape[0] def transform(self, X): return X + self.foo_param def inverse_transform(self, X): return X - self.foo_param predict_proba = predict predict_log_proba = predict decision_function = predict def score(self, X=None, Y=None): if self.foo_param > 1: score = 1. else: score = 0. return score def get_params(self, deep=False): return {'foo_param': self.foo_param} def set_params(self, **params): self.foo_param = params['foo_param'] return self class LinearSVCNoScore(LinearSVC): """An LinearSVC classifier that has no score method.""" @property def score(self): raise AttributeError X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) y = np.array([1, 1, 2, 2]) def assert_grid_iter_equals_getitem(grid): assert list(grid) == [grid[i] for i in range(len(grid))] @pytest.mark.parametrize("klass", [ParameterGrid, partial(ParameterSampler, n_iter=10)]) @pytest.mark.parametrize( "input, error_type, error_message", [(0, TypeError, r'Parameter .* is not a dict or a list \(0\)'), ([{'foo': [0]}, 0], TypeError, r'Parameter .* is not a dict \(0\)'), ({'foo': 0}, TypeError, "Parameter.* value is not iterable .*" r"\(key='foo', value=0\)")] ) def test_validate_parameter_input(klass, input, error_type, error_message): with pytest.raises(error_type, match=error_message): klass(input) def test_parameter_grid(): # Test basic properties of ParameterGrid. params1 = {"foo": [1, 2, 3]} grid1 = ParameterGrid(params1) assert isinstance(grid1, Iterable) assert isinstance(grid1, Sized) assert len(grid1) == 3 assert_grid_iter_equals_getitem(grid1) params2 = {"foo": [4, 2], "bar": ["ham", "spam", "eggs"]} grid2 = ParameterGrid(params2) assert len(grid2) == 6 # loop to assert we can iterate over the grid multiple times for i in range(2): # tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2) points = set(tuple(chain(*(sorted(p.items())))) for p in grid2) assert (points == set(("bar", x, "foo", y) for x, y in product(params2["bar"], params2["foo"]))) assert_grid_iter_equals_getitem(grid2) # Special case: empty grid (useful to get default estimator settings) empty = ParameterGrid({}) assert len(empty) == 1 assert list(empty) == [{}] assert_grid_iter_equals_getitem(empty) assert_raises(IndexError, lambda: empty[1]) has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}]) assert len(has_empty) == 4 assert list(has_empty) == [{'C': 1}, {'C': 10}, {}, {'C': .5}] assert_grid_iter_equals_getitem(has_empty) def test_grid_search(): # Test that the best estimator contains the right value for foo_param clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=3, verbose=3) # make sure it selects the smallest parameter in case of ties old_stdout = sys.stdout sys.stdout = StringIO() grid_search.fit(X, y) sys.stdout = old_stdout assert grid_search.best_estimator_.foo_param == 2 assert_array_equal(grid_search.cv_results_["param_foo_param"].data, [1, 2, 3]) # Smoke test the score etc: grid_search.score(X, y) grid_search.predict_proba(X) grid_search.decision_function(X) grid_search.transform(X) # Test exception handling on scoring grid_search.scoring = 'sklearn' assert_raises(ValueError, grid_search.fit, X, y) def test_grid_search_pipeline_steps(): # check that parameters that are estimators are cloned before fitting pipe = Pipeline([('regressor', LinearRegression())]) param_grid = {'regressor': [LinearRegression(), Ridge()]} grid_search = GridSearchCV(pipe, param_grid, cv=2) grid_search.fit(X, y) regressor_results = grid_search.cv_results_['param_regressor'] assert isinstance(regressor_results[0], LinearRegression) assert isinstance(regressor_results[1], Ridge) assert not hasattr(regressor_results[0], 'coef_') assert not hasattr(regressor_results[1], 'coef_') assert regressor_results[0] is not grid_search.best_estimator_ assert regressor_results[1] is not grid_search.best_estimator_ # check that we didn't modify the parameter grid that was passed assert not hasattr(param_grid['regressor'][0], 'coef_') assert not hasattr(param_grid['regressor'][1], 'coef_') @pytest.mark.parametrize("SearchCV", [GridSearchCV, RandomizedSearchCV]) def test_SearchCV_with_fit_params(SearchCV): X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = CheckingClassifier(expected_fit_params=['spam', 'eggs']) searcher = SearchCV( clf, {'foo_param': [1, 2, 3]}, cv=2, error_score="raise" ) # The CheckingClassifier generates an assertion error if # a parameter is missing or has length != len(X). err_msg = r"Expected fit parameter\(s\) \['eggs'\] not seen." with pytest.raises(AssertionError, match=err_msg): searcher.fit(X, y, spam=np.ones(10)) err_msg = "Fit parameter spam has length 1; expected" with pytest.raises(AssertionError, match=err_msg): searcher.fit(X, y, spam=np.ones(1), eggs=
np.zeros(10)
numpy.zeros
# pvtrace is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # pvtrace is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import numpy as np from external.transformations import translation_matrix, rotation_matrix import external.transformations as tf from Trace import Photon from Geometry import Box, Cylinder, FinitePlane, transform_point, transform_direction, rotation_matrix_from_vector_alignment, norm from Materials import Spectrum def random_spherecial_vector(): # This method of calculating isotropic vectors is taken from GNU Scientific Library LOOP = True while LOOP: x = -1. + 2. * np.random.uniform() y = -1. + 2. * np.random.uniform() s = x**2 + y**2 if s <= 1.0: LOOP = False z = -1. + 2. * s a = 2 * np.sqrt(1 - s) x = a * x y = a * y return np.array([x,y,z]) class SimpleSource(object): """A light source that will generate photons of a single colour, direction and position.""" def __init__(self, position=[0,0,0], direction=[0,0,1], wavelength=555, use_random_polarisation=False): super(SimpleSource, self).__init__() self.position = position self.direction = direction self.wavelength = wavelength self.use_random_polarisation = use_random_polarisation self.throw = 0 self.source_id = "SimpleSource_" + str(id(self)) def photon(self): photon = Photon() photon.source = self.source_id photon.position = np.array(self.position) photon.direction = np.array(self.direction) photon.active = True photon.wavelength = self.wavelength # If use_polarisation is set generate a random polarisation vector of the photon if self.use_random_polarisation: # Randomise rotation angle around xy-plane, the transform from +z to the direction of the photon vec = random_spherecial_vector() vec[2] = 0. vec = norm(vec) R = rotation_matrix_from_vector_alignment(self.direction, [0,0,1]) photon.polarisation = transform_direction(vec, R) else: photon.polarisation = None photon.id = self.throw self.throw = self.throw + 1 return photon class Laser(object): """A light source that will generate photons of a single colour, direction and position.""" def __init__(self, position=[0,0,0], direction=[0,0,1], wavelength=555, polarisation=None): super(Laser, self).__init__() self.position = np.array(position) self.direction = np.array(direction) self.wavelength = wavelength assert polarisation != None, "Polarisation of the Laser is not set." self.polarisation = np.array(polarisation) self.throw = 0 self.source_id = "LaserSource_" + str(id(self)) def photon(self): photon = Photon() photon.source = self.source_id photon.position = np.array(self.position) photon.direction = np.array(self.direction) photon.active = True photon.wavelength = self.wavelength photon.polarisation = self.polarisation photon.id = self.throw self.throw = self.throw + 1 return photon class PlanarSource(object): """A box that emits photons from the top surface (normal), sampled from the spectrum.""" def __init__(self, spectrum=None, wavelength=555, direction=(0,0,1), length=0.05, width=0.05): super(PlanarSource, self).__init__() self.spectrum = spectrum self.wavelength = wavelength self.plane = FinitePlane(length=length, width=width) self.length = length self.width = width # direction is the direction that photons are fired out of the plane in the GLOBAL FRAME. # i.e. this is passed directly to the photon to set is's direction self.direction = direction self.throw = 0 self.source_id = "PlanarSource_" + str(id(self)) def translate(self, translation): self.plane.append_transform(tf.translation_matrix(translation)) def rotate(self, angle, axis): self.plane.append_transform(tf.rotation_matrix(angle, axis)) def photon(self): photon = Photon() photon.source = self.source_id photon.id = self.throw self.throw = self.throw + 1 # Create a point which is on the surface of the finite plane in it's local frame x = np.random.uniform(0., self.length) y = np.random.uniform(0., self.width) local_point = (x, y, 0.) # Transform the direciton photon.position = transform_point(local_point, self.plane.transform) photon.direction = self.direction photon.active = True if self.spectrum != None: photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform()) else: photon.wavelength = self.wavelength return photon class LensSource(object): """ A source where photons generated in a plane are focused on a line with space tolerance given by variable "focussize". The focus line should be perpendicular to the plane normal and aligned with the z-axis. """ def __init__(self, spectrum = None, wavelength = 555, linepoint=(0,0,0), linedirection=(0,0,1), focussize = 0, planeorigin = (-1,-1,-1), planeextent = (-1,1,1)): super(LensSource, self).__init__() self.spectrum = spectrum self.wavelength = wavelength self.planeorigin = planeorigin self.planeextent = planeextent self.linepoint = np.array(linepoint) self.linedirection = np.array(linedirection) self.focussize = focussize self.throw = 0 self.source_id = "LensSource_" + str(id(self)) def photon(self): photon = Photon() photon.source = self.source_id photon.id = self.throw self.throw = self.throw + 1 # Position x = np.random.uniform(self.planeorigin[0],self.planeextent[0]) y = np.random.uniform(self.planeorigin[1],self.planeextent[1]) z = np.random.uniform(self.planeorigin[2],self.planeextent[2]) photon.position =
np.array((x,y,z))
numpy.array
import os import string from collections import Counter from datetime import datetime from functools import partial from pathlib import Path from typing import Optional import numpy as np import pandas as pd from scipy.stats.stats import chisquare from tangled_up_in_unicode import block, block_abbr, category, category_long, script from pandas_profiling.config import Settings from pandas_profiling.model.summary_helpers_image import ( extract_exif, hash_image, is_image_truncated, open_image, ) def mad(arr: np.ndarray) -> np.ndarray: """Median Absolute Deviation: a "Robust" version of standard deviation. Indices variability of the sample. https://en.wikipedia.org/wiki/Median_absolute_deviation """ return np.median(np.abs(arr - np.median(arr))) def named_aggregate_summary(series: pd.Series, key: str) -> dict: summary = { f"max_{key}": np.max(series), f"mean_{key}": np.mean(series), f"median_{key}": np.median(series), f"min_{key}": np.min(series), } return summary def length_summary(series: pd.Series, summary: dict = None) -> dict: if summary is None: summary = {} length = series.str.len() summary.update({"length": length}) summary.update(named_aggregate_summary(length, "length")) return summary def file_summary(series: pd.Series) -> dict: """ Args: series: series to summarize Returns: """ # Transform stats = series.map(lambda x: os.stat(x)) def convert_datetime(x: float) -> str: return datetime.fromtimestamp(x).strftime("%Y-%m-%d %H:%M:%S") # Transform some more summary = { "file_size": stats.map(lambda x: x.st_size), "file_created_time": stats.map(lambda x: x.st_ctime).map(convert_datetime), "file_accessed_time": stats.map(lambda x: x.st_atime).map(convert_datetime), "file_modified_time": stats.map(lambda x: x.st_mtime).map(convert_datetime), } return summary def path_summary(series: pd.Series) -> dict: """ Args: series: series to summarize Returns: """ # TODO: optimize using value counts summary = { "common_prefix": os.path.commonprefix(series.values.tolist()) or "No common prefix", "stem_counts": series.map(lambda x: os.path.splitext(x)[0]).value_counts(), "suffix_counts": series.map(lambda x: os.path.splitext(x)[1]).value_counts(), "name_counts": series.map(lambda x: os.path.basename(x)).value_counts(), "parent_counts": series.map(lambda x: os.path.dirname(x)).value_counts(), "anchor_counts": series.map(lambda x: os.path.splitdrive(x)[0]).value_counts(), } summary["n_stem_unique"] = len(summary["stem_counts"]) summary["n_suffix_unique"] = len(summary["suffix_counts"]) summary["n_name_unique"] = len(summary["name_counts"]) summary["n_parent_unique"] = len(summary["parent_counts"]) summary["n_anchor_unique"] = len(summary["anchor_counts"]) return summary def url_summary(series: pd.Series) -> dict: """ Args: series: series to summarize Returns: """ summary = { "scheme_counts": series.map(lambda x: x.scheme).value_counts(), "netloc_counts": series.map(lambda x: x.netloc).value_counts(), "path_counts": series.map(lambda x: x.path).value_counts(), "query_counts": series.map(lambda x: x.query).value_counts(), "fragment_counts": series.map(lambda x: x.fragment).value_counts(), } return summary def count_duplicate_hashes(image_descriptions: dict) -> int: """ Args: image_descriptions: Returns: """ counts = pd.Series( [x["hash"] for x in image_descriptions if "hash" in x] ).value_counts() return counts.sum() - len(counts) def extract_exif_series(image_exifs: list) -> dict: """ Args: image_exifs: Returns: """ exif_keys = [] exif_values: dict = {} for image_exif in image_exifs: # Extract key exif_keys.extend(list(image_exif.keys())) # Extract values per key for exif_key, exif_val in image_exif.items(): if exif_key not in exif_values: exif_values[exif_key] = [] exif_values[exif_key].append(exif_val) series = {"exif_keys": pd.Series(exif_keys, dtype=object).value_counts().to_dict()} for k, v in exif_values.items(): series[k] = pd.Series(v).value_counts() return series def extract_image_information( path: Path, exif: bool = False, hash: bool = False ) -> dict: """Extracts all image information per file, as opening files is slow Args: path: Path to the image exif: extract exif information hash: calculate hash (for duplicate detection) Returns: A dict containing image information """ information: dict = {} image = open_image(path) information["opened"] = image is not None if image is not None: information["truncated"] = is_image_truncated(image) if not information["truncated"]: information["size"] = image.size if exif: information["exif"] = extract_exif(image) if hash: information["hash"] = hash_image(image) return information def image_summary(series: pd.Series, exif: bool = False, hash: bool = False) -> dict: """ Args: series: series to summarize exif: extract exif information hash: calculate hash (for duplicate detection) Returns: """ image_information = series.apply( partial(extract_image_information, exif=exif, hash=hash) ) summary = { "n_truncated": sum( [1 for x in image_information if "truncated" in x and x["truncated"]] ), "image_dimensions": pd.Series( [x["size"] for x in image_information if "size" in x], name="image_dimensions", ), } image_widths = summary["image_dimensions"].map(lambda x: x[0]) summary.update(named_aggregate_summary(image_widths, "width")) image_heights = summary["image_dimensions"].map(lambda x: x[1]) summary.update(named_aggregate_summary(image_heights, "height")) image_areas = image_widths * image_heights summary.update(named_aggregate_summary(image_areas, "area")) if hash: summary["n_duplicate_hash"] = count_duplicate_hashes(image_information) if exif: exif_series = extract_exif_series( [x["exif"] for x in image_information if "exif" in x] ) summary["exif_keys_counts"] = exif_series["exif_keys"] summary["exif_data"] = exif_series return summary def get_character_counts(series: pd.Series) -> Counter: """Function to return the character counts Args: series: the Series to process Returns: A dict with character counts """ return Counter(series.str.cat()) def counter_to_series(counter: Counter) -> pd.Series: if not counter: return pd.Series([], dtype=object) counter_as_tuples = counter.most_common() items, counts = zip(*counter_as_tuples) return pd.Series(counts, index=items) def unicode_summary(series: pd.Series) -> dict: # Unicode Character Summaries (category and script name) character_counts = get_character_counts(series) character_counts_series = counter_to_series(character_counts) char_to_block = {key: block(key) for key in character_counts.keys()} char_to_category_short = {key: category(key) for key in character_counts.keys()} char_to_script = {key: script(key) for key in character_counts.keys()} summary = { "n_characters": len(character_counts_series), "character_counts": character_counts_series, "category_alias_values": { key: category_long(value) for key, value in char_to_category_short.items() }, "block_alias_values": { key: block_abbr(value) for key, value in char_to_block.items() }, } # Retrieve original distribution block_alias_counts: Counter = Counter() per_block_char_counts: dict = { k: Counter() for k in summary["block_alias_values"].values() } for char, n_char in character_counts.items(): block_name = summary["block_alias_values"][char] block_alias_counts[block_name] += n_char per_block_char_counts[block_name][char] = n_char summary["block_alias_counts"] = counter_to_series(block_alias_counts) summary["block_alias_char_counts"] = { k: counter_to_series(v) for k, v in per_block_char_counts.items() } script_counts: Counter = Counter() per_script_char_counts: dict = {k: Counter() for k in char_to_script.values()} for char, n_char in character_counts.items(): script_name = char_to_script[char] script_counts[script_name] += n_char per_script_char_counts[script_name][char] = n_char summary["script_counts"] = counter_to_series(script_counts) summary["script_char_counts"] = { k: counter_to_series(v) for k, v in per_script_char_counts.items() } category_alias_counts: Counter = Counter() per_category_alias_char_counts: dict = { k: Counter() for k in summary["category_alias_values"].values() } for char, n_char in character_counts.items(): category_alias_name = summary["category_alias_values"][char] category_alias_counts[category_alias_name] += n_char per_category_alias_char_counts[category_alias_name][char] += n_char summary["category_alias_counts"] = counter_to_series(category_alias_counts) summary["category_alias_char_counts"] = { k: counter_to_series(v) for k, v in per_category_alias_char_counts.items() } # Unique counts summary["n_category"] = len(summary["category_alias_counts"]) summary["n_scripts"] = len(summary["script_counts"]) summary["n_block_alias"] = len(summary["block_alias_counts"]) if len(summary["category_alias_counts"]) > 0: summary["category_alias_counts"].index = summary[ "category_alias_counts" ].index.str.replace("_", " ") return summary def histogram_compute( config: Settings, finite_values: np.ndarray, n_unique: int, name: str = "histogram", weights: Optional[np.ndarray] = None, ) -> dict: stats = {} bins = config.plot.histogram.bins bins_arg = "auto" if bins == 0 else min(bins, n_unique) stats[name] =
np.histogram(finite_values, bins=bins_arg, weights=weights)
numpy.histogram
"""Test the search module""" from collections.abc import Iterable, Sized from io import StringIO from itertools import chain, product from functools import partial import pickle import sys from types import GeneratorType import re import numpy as np import scipy.sparse as sp import pytest from sklearn.utils.fixes import sp_version from sklearn.utils._testing import assert_raises from sklearn.utils._testing import assert_warns from sklearn.utils._testing import assert_warns_message from sklearn.utils._testing import assert_raise_message from sklearn.utils._testing import assert_array_equal from sklearn.utils._testing import assert_array_almost_equal from sklearn.utils._testing import assert_allclose from sklearn.utils._testing import assert_almost_equal from sklearn.utils._testing import ignore_warnings from sklearn.utils._mocking import CheckingClassifier, MockDataFrame from scipy.stats import bernoulli, expon, uniform from sklearn.base import BaseEstimator, ClassifierMixin from sklearn.base import clone from sklearn.exceptions import NotFittedError from sklearn.datasets import make_classification from sklearn.datasets import make_blobs from sklearn.datasets import make_multilabel_classification from sklearn.model_selection import fit_grid_point from sklearn.model_selection import train_test_split from sklearn.model_selection import KFold from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import StratifiedShuffleSplit from sklearn.model_selection import LeaveOneGroupOut from sklearn.model_selection import LeavePGroupsOut from sklearn.model_selection import GroupKFold from sklearn.model_selection import GroupShuffleSplit from sklearn.model_selection import GridSearchCV from sklearn.model_selection import RandomizedSearchCV from sklearn.model_selection import ParameterGrid from sklearn.model_selection import ParameterSampler from sklearn.model_selection._search import BaseSearchCV from sklearn.model_selection._validation import FitFailedWarning from sklearn.svm import LinearSVC, SVC from sklearn.tree import DecisionTreeRegressor from sklearn.tree import DecisionTreeClassifier from sklearn.cluster import KMeans from sklearn.neighbors import KernelDensity from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import f1_score from sklearn.metrics import recall_score from sklearn.metrics import accuracy_score from sklearn.metrics import make_scorer from sklearn.metrics import roc_auc_score from sklearn.metrics.pairwise import euclidean_distances from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline from sklearn.linear_model import Ridge, SGDClassifier, LinearRegression from sklearn.experimental import enable_hist_gradient_boosting # noqa from sklearn.ensemble import HistGradientBoostingClassifier from sklearn.model_selection.tests.common import OneTimeSplitter # Neither of the following two estimators inherit from BaseEstimator, # to test hyperparameter search on user-defined classifiers. class MockClassifier: """Dummy classifier to test the parameter search algorithms""" def __init__(self, foo_param=0): self.foo_param = foo_param def fit(self, X, Y): assert len(X) == len(Y) self.classes_ = np.unique(Y) return self def predict(self, T): return T.shape[0] def transform(self, X): return X + self.foo_param def inverse_transform(self, X): return X - self.foo_param predict_proba = predict predict_log_proba = predict decision_function = predict def score(self, X=None, Y=None): if self.foo_param > 1: score = 1. else: score = 0. return score def get_params(self, deep=False): return {'foo_param': self.foo_param} def set_params(self, **params): self.foo_param = params['foo_param'] return self class LinearSVCNoScore(LinearSVC): """An LinearSVC classifier that has no score method.""" @property def score(self): raise AttributeError X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) y = np.array([1, 1, 2, 2]) def assert_grid_iter_equals_getitem(grid): assert list(grid) == [grid[i] for i in range(len(grid))] @pytest.mark.parametrize("klass", [ParameterGrid, partial(ParameterSampler, n_iter=10)]) @pytest.mark.parametrize( "input, error_type, error_message", [(0, TypeError, r'Parameter .* is not a dict or a list \(0\)'), ([{'foo': [0]}, 0], TypeError, r'Parameter .* is not a dict \(0\)'), ({'foo': 0}, TypeError, "Parameter.* value is not iterable .*" r"\(key='foo', value=0\)")] ) def test_validate_parameter_input(klass, input, error_type, error_message): with pytest.raises(error_type, match=error_message): klass(input) def test_parameter_grid(): # Test basic properties of ParameterGrid. params1 = {"foo": [1, 2, 3]} grid1 = ParameterGrid(params1) assert isinstance(grid1, Iterable) assert isinstance(grid1, Sized) assert len(grid1) == 3 assert_grid_iter_equals_getitem(grid1) params2 = {"foo": [4, 2], "bar": ["ham", "spam", "eggs"]} grid2 = ParameterGrid(params2) assert len(grid2) == 6 # loop to assert we can iterate over the grid multiple times for i in range(2): # tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2) points = set(tuple(chain(*(sorted(p.items())))) for p in grid2) assert (points == set(("bar", x, "foo", y) for x, y in product(params2["bar"], params2["foo"]))) assert_grid_iter_equals_getitem(grid2) # Special case: empty grid (useful to get default estimator settings) empty = ParameterGrid({}) assert len(empty) == 1 assert list(empty) == [{}] assert_grid_iter_equals_getitem(empty) assert_raises(IndexError, lambda: empty[1]) has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}]) assert len(has_empty) == 4 assert list(has_empty) == [{'C': 1}, {'C': 10}, {}, {'C': .5}] assert_grid_iter_equals_getitem(has_empty) def test_grid_search(): # Test that the best estimator contains the right value for foo_param clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=3, verbose=3) # make sure it selects the smallest parameter in case of ties old_stdout = sys.stdout sys.stdout = StringIO() grid_search.fit(X, y) sys.stdout = old_stdout assert grid_search.best_estimator_.foo_param == 2 assert_array_equal(grid_search.cv_results_["param_foo_param"].data, [1, 2, 3]) # Smoke test the score etc: grid_search.score(X, y) grid_search.predict_proba(X) grid_search.decision_function(X) grid_search.transform(X) # Test exception handling on scoring grid_search.scoring = 'sklearn' assert_raises(ValueError, grid_search.fit, X, y) def test_grid_search_pipeline_steps(): # check that parameters that are estimators are cloned before fitting pipe = Pipeline([('regressor', LinearRegression())]) param_grid = {'regressor': [LinearRegression(), Ridge()]} grid_search = GridSearchCV(pipe, param_grid, cv=2) grid_search.fit(X, y) regressor_results = grid_search.cv_results_['param_regressor'] assert isinstance(regressor_results[0], LinearRegression) assert isinstance(regressor_results[1], Ridge) assert not hasattr(regressor_results[0], 'coef_') assert not hasattr(regressor_results[1], 'coef_') assert regressor_results[0] is not grid_search.best_estimator_ assert regressor_results[1] is not grid_search.best_estimator_ # check that we didn't modify the parameter grid that was passed assert not hasattr(param_grid['regressor'][0], 'coef_') assert not hasattr(param_grid['regressor'][1], 'coef_') @pytest.mark.parametrize("SearchCV", [GridSearchCV, RandomizedSearchCV]) def test_SearchCV_with_fit_params(SearchCV): X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = CheckingClassifier(expected_fit_params=['spam', 'eggs']) searcher = SearchCV( clf, {'foo_param': [1, 2, 3]}, cv=2, error_score="raise" ) # The CheckingClassifier generates an assertion error if # a parameter is missing or has length != len(X). err_msg = r"Expected fit parameter\(s\) \['eggs'\] not seen." with pytest.raises(AssertionError, match=err_msg): searcher.fit(X, y, spam=
np.ones(10)
numpy.ones
""" YTArray class. """ from __future__ import print_function #----------------------------------------------------------------------------- # Copyright (c) 2013, yt Development Team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. #----------------------------------------------------------------------------- import copy import numpy as np from distutils.version import LooseVersion from functools import wraps from numpy import \ add, subtract, multiply, divide, logaddexp, logaddexp2, true_divide, \ floor_divide, negative, power, remainder, mod, absolute, rint, \ sign, conj, exp, exp2, log, log2, log10, expm1, log1p, sqrt, square, \ reciprocal, sin, cos, tan, arcsin, arccos, arctan, arctan2, \ hypot, sinh, cosh, tanh, arcsinh, arccosh, arctanh, deg2rad, rad2deg, \ bitwise_and, bitwise_or, bitwise_xor, invert, left_shift, right_shift, \ greater, greater_equal, less, less_equal, not_equal, equal, logical_and, \ logical_or, logical_xor, logical_not, maximum, minimum, fmax, fmin, \ isreal, iscomplex, isfinite, isinf, isnan, signbit, copysign, nextafter, \ modf, ldexp, frexp, fmod, floor, ceil, trunc, fabs, spacing try: # numpy 1.13 or newer from numpy import positive, divmod as divmod_, isnat, heaviside except ImportError: positive, divmod_, isnat, heaviside = (None,)*4 from yt.units.unit_object import Unit, UnitParseError from yt.units.unit_registry import UnitRegistry from yt.units.dimensions import \ angle, \ current_mks, \ dimensionless, \ em_dimensions from yt.utilities.exceptions import \ YTUnitOperationError, YTUnitConversionError, \ YTUfuncUnitError, YTIterableUnitCoercionError, \ YTInvalidUnitEquivalence, YTEquivalentDimsError from yt.utilities.lru_cache import lru_cache from numbers import Number as numeric_type from yt.utilities.on_demand_imports import _astropy from sympy import Rational from yt.units.unit_lookup_table import \ default_unit_symbol_lut from yt.units.equivalencies import equivalence_registry from yt.utilities.logger import ytLogger as mylog from .pint_conversions import convert_pint_units NULL_UNIT = Unit() POWER_SIGN_MAPPING = {multiply: 1, divide: -1} # redefine this here to avoid a circular import from yt.funcs def iterable(obj): try: len(obj) except: return False return True def return_arr(func): @wraps(func) def wrapped(*args, **kwargs): ret, units = func(*args, **kwargs) if ret.shape == (): return YTQuantity(ret, units) else: # This could be a subclass, so don't call YTArray directly. return type(args[0])(ret, units) return wrapped @lru_cache(maxsize=128, typed=False) def sqrt_unit(unit): return unit**0.5 @lru_cache(maxsize=128, typed=False) def multiply_units(unit1, unit2): return unit1 * unit2 def preserve_units(unit1, unit2=None): return unit1 @lru_cache(maxsize=128, typed=False) def power_unit(unit, power): return unit**power @lru_cache(maxsize=128, typed=False) def square_unit(unit): return unit*unit @lru_cache(maxsize=128, typed=False) def divide_units(unit1, unit2): return unit1/unit2 @lru_cache(maxsize=128, typed=False) def reciprocal_unit(unit): return unit**-1 def passthrough_unit(unit, unit2=None): return unit def return_without_unit(unit, unit2=None): return None def arctan2_unit(unit1, unit2): return NULL_UNIT def comparison_unit(unit1, unit2=None): return None def invert_units(unit): raise TypeError( "Bit-twiddling operators are not defined for YTArray instances") def bitop_units(unit1, unit2): raise TypeError( "Bit-twiddling operators are not defined for YTArray instances") def get_inp_u_unary(ufunc, inputs, out_arr=None): inp = inputs[0] u = getattr(inp, 'units', None) if u is None: u = NULL_UNIT if u.dimensions is angle and ufunc in trigonometric_operators: inp = inp.in_units('radian').v if out_arr is not None: out_arr = ufunc(inp).view(np.ndarray) return out_arr, inp, u def get_inp_u_binary(ufunc, inputs): inp1 = coerce_iterable_units(inputs[0]) inp2 = coerce_iterable_units(inputs[1]) unit1 = getattr(inp1, 'units', None) unit2 = getattr(inp2, 'units', None) ret_class = get_binary_op_return_class(type(inp1), type(inp2)) if unit1 is None: unit1 = Unit(registry=getattr(unit2, 'registry', None)) if unit2 is None and ufunc is not power: unit2 = Unit(registry=getattr(unit1, 'registry', None)) elif ufunc is power: unit2 = inp2 if isinstance(unit2, np.ndarray): if isinstance(unit2, YTArray): if unit2.units.is_dimensionless: pass else: raise YTUnitOperationError(ufunc, unit1, unit2) unit2 = 1.0 return (inp1, inp2), (unit1, unit2), ret_class def handle_preserve_units(inps, units, ufunc, ret_class): if units[0] != units[1]: any_nonzero = [np.any(inps[0]), np.any(inps[1])] if any_nonzero[0] == np.bool_(False): units = (units[1], units[1]) elif any_nonzero[1] == np.bool_(False): units = (units[0], units[0]) else: if not units[0].same_dimensions_as(units[1]): raise YTUnitOperationError(ufunc, *units) inps = (inps[0], ret_class(inps[1]).to( ret_class(inps[0]).units)) return inps, units def handle_comparison_units(inps, units, ufunc, ret_class, raise_error=False): if units[0] != units[1]: u1d = units[0].is_dimensionless u2d = units[1].is_dimensionless any_nonzero = [np.any(inps[0]), np.any(inps[1])] if any_nonzero[0] == np.bool_(False): units = (units[1], units[1]) elif any_nonzero[1] == np.bool_(False): units = (units[0], units[0]) elif not any([u1d, u2d]): if not units[0].same_dimensions_as(units[1]): raise YTUnitOperationError(ufunc, *units) else: if raise_error: raise YTUfuncUnitError(ufunc, *units) inps = (inps[0], ret_class(inps[1]).to( ret_class(inps[0]).units)) return inps, units def handle_multiply_divide_units(unit, units, out, out_arr): if unit.is_dimensionless and unit.base_value != 1.0: if not units[0].is_dimensionless: if units[0].dimensions == units[1].dimensions: out_arr = np.multiply(out_arr.view(np.ndarray), unit.base_value, out=out) unit = Unit(registry=unit.registry) return out, out_arr, unit def coerce_iterable_units(input_object): if isinstance(input_object, np.ndarray): return input_object if iterable(input_object): if any([isinstance(o, YTArray) for o in input_object]): ff = getattr(input_object[0], 'units', NULL_UNIT, ) if any([ff != getattr(_, 'units', NULL_UNIT) for _ in input_object]): raise YTIterableUnitCoercionError(input_object) # This will create a copy of the data in the iterable. return YTArray(input_object) return input_object else: return input_object def sanitize_units_mul(this_object, other_object): inp = coerce_iterable_units(this_object) ret = coerce_iterable_units(other_object) # If the other object is a YTArray and has the same dimensions as the object # under consideration, convert so we don't mix units with the same # dimensions. if isinstance(ret, YTArray): if inp.units.same_dimensions_as(ret.units): ret.in_units(inp.units) return ret def sanitize_units_add(this_object, other_object, op_string): inp = coerce_iterable_units(this_object) ret = coerce_iterable_units(other_object) # Make sure the other object is a YTArray before we use the `units` # attribute. if isinstance(ret, YTArray): if not inp.units.same_dimensions_as(ret.units): # handle special case of adding or subtracting with zero or # array filled with zero if not np.any(other_object): return ret.view(np.ndarray) elif not np.any(this_object): return ret raise YTUnitOperationError(op_string, inp.units, ret.units) ret = ret.in_units(inp.units) else: # If the other object is not a YTArray, then one of the arrays must be # dimensionless or filled with zeros if not inp.units.is_dimensionless and np.any(ret): raise YTUnitOperationError(op_string, inp.units, dimensionless) return ret def validate_comparison_units(this, other, op_string): # Check that other is a YTArray. if hasattr(other, 'units'): if this.units.expr is other.units.expr: if this.units.base_value == other.units.base_value: return other if not this.units.same_dimensions_as(other.units): raise YTUnitOperationError(op_string, this.units, other.units) return other.in_units(this.units) return other @lru_cache(maxsize=128, typed=False) def _unit_repr_check_same(my_units, other_units): """ Takes a Unit object, or string of known unit symbol, and check that it is compatible with this quantity. Returns Unit object. """ # let Unit() handle units arg if it's not already a Unit obj. if not isinstance(other_units, Unit): other_units = Unit(other_units, registry=my_units.registry) equiv_dims = em_dimensions.get(my_units.dimensions, None) if equiv_dims == other_units.dimensions: if current_mks in equiv_dims.free_symbols: base = "SI" else: base = "CGS" raise YTEquivalentDimsError(my_units, other_units, base) if not my_units.same_dimensions_as(other_units): raise YTUnitConversionError( my_units, my_units.dimensions, other_units, other_units.dimensions) return other_units unary_operators = ( negative, absolute, rint, sign, conj, exp, exp2, log, log2, log10, expm1, log1p, sqrt, square, reciprocal, sin, cos, tan, arcsin, arccos, arctan, sinh, cosh, tanh, arcsinh, arccosh, arctanh, deg2rad, rad2deg, invert, logical_not, isreal, iscomplex, isfinite, isinf, isnan, signbit, floor, ceil, trunc, modf, frexp, fabs, spacing, positive, isnat, ) binary_operators = ( add, subtract, multiply, divide, logaddexp, logaddexp2, true_divide, power, remainder, mod, arctan2, hypot, bitwise_and, bitwise_or, bitwise_xor, left_shift, right_shift, greater, greater_equal, less, less_equal, not_equal, equal, logical_and, logical_or, logical_xor, maximum, minimum, fmax, fmin, copysign, nextafter, ldexp, fmod, divmod_, heaviside ) trigonometric_operators = ( sin, cos, tan, ) class YTArray(np.ndarray): """ An ndarray subclass that attaches a symbolic unit object to the array data. Parameters ---------- input_array : :obj:`!iterable` A tuple, list, or array to attach units to input_units : String unit specification, unit symbol object, or astropy units The units of the array. Powers must be specified using python syntax (cm**3, not cm^3). registry : ~yt.units.unit_registry.UnitRegistry The registry to create units from. If input_units is already associated with a unit registry and this is specified, this will be used instead of the registry associated with the unit object. dtype : data-type The dtype of the array data. Defaults to the dtype of the input data, or, if none is found, uses np.float64 bypass_validation : boolean If True, all input validation is skipped. Using this option may produce corrupted, invalid units or array data, but can lead to significant speedups in the input validation logic adds significant overhead. If set, input_units *must* be a valid unit object. Defaults to False. Examples -------- >>> from yt import YTArray >>> a = YTArray([1, 2, 3], 'cm') >>> b = YTArray([4, 5, 6], 'm') >>> a + b YTArray([ 401., 502., 603.]) cm >>> b + a YTArray([ 4.01, 5.02, 6.03]) m NumPy ufuncs will pass through units where appropriate. >>> import numpy as np >>> a = YTArray(np.arange(8) - 4, 'g/cm**3') >>> np.abs(a) YTArray([4, 3, 2, 1, 0, 1, 2, 3]) g/cm**3 and strip them when it would be annoying to deal with them. >>> np.log10(a) array([ -inf, 0. , 0.30103 , 0.47712125, 0.60205999, 0.69897 , 0.77815125, 0.84509804]) YTArray is tightly integrated with yt datasets: >>> import yt >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030') >>> a = ds.arr(np.ones(5), 'code_length') >>> a.in_cgs() YTArray([ 3.08600000e+24, 3.08600000e+24, 3.08600000e+24, 3.08600000e+24, 3.08600000e+24]) cm This is equivalent to: >>> b = YTArray(np.ones(5), 'code_length', registry=ds.unit_registry) >>> np.all(a == b) True """ _ufunc_registry = { add: preserve_units, subtract: preserve_units, multiply: multiply_units, divide: divide_units, logaddexp: return_without_unit, logaddexp2: return_without_unit, true_divide: divide_units, floor_divide: divide_units, negative: passthrough_unit, power: power_unit, remainder: preserve_units, mod: preserve_units, fmod: preserve_units, absolute: passthrough_unit, fabs: passthrough_unit, rint: return_without_unit, sign: return_without_unit, conj: passthrough_unit, exp: return_without_unit, exp2: return_without_unit, log: return_without_unit, log2: return_without_unit, log10: return_without_unit, expm1: return_without_unit, log1p: return_without_unit, sqrt: sqrt_unit, square: square_unit, reciprocal: reciprocal_unit, sin: return_without_unit, cos: return_without_unit, tan: return_without_unit, sinh: return_without_unit, cosh: return_without_unit, tanh: return_without_unit, arcsin: return_without_unit, arccos: return_without_unit, arctan: return_without_unit, arctan2: arctan2_unit, arcsinh: return_without_unit, arccosh: return_without_unit, arctanh: return_without_unit, hypot: preserve_units, deg2rad: return_without_unit, rad2deg: return_without_unit, bitwise_and: bitop_units, bitwise_or: bitop_units, bitwise_xor: bitop_units, invert: invert_units, left_shift: bitop_units, right_shift: bitop_units, greater: comparison_unit, greater_equal: comparison_unit, less: comparison_unit, less_equal: comparison_unit, not_equal: comparison_unit, equal: comparison_unit, logical_and: comparison_unit, logical_or: comparison_unit, logical_xor: comparison_unit, logical_not: return_without_unit, maximum: preserve_units, minimum: preserve_units, fmax: preserve_units, fmin: preserve_units, isreal: return_without_unit, iscomplex: return_without_unit, isfinite: return_without_unit, isinf: return_without_unit, isnan: return_without_unit, signbit: return_without_unit, copysign: passthrough_unit, nextafter: preserve_units, modf: passthrough_unit, ldexp: bitop_units, frexp: return_without_unit, floor: passthrough_unit, ceil: passthrough_unit, trunc: passthrough_unit, spacing: passthrough_unit, positive: passthrough_unit, divmod_: passthrough_unit, isnat: return_without_unit, heaviside: preserve_units, } __array_priority__ = 2.0 def __new__(cls, input_array, input_units=None, registry=None, dtype=None, bypass_validation=False): if dtype is None: dtype = getattr(input_array, 'dtype', np.float64) if bypass_validation is True: obj = np.asarray(input_array, dtype=dtype).view(cls) obj.units = input_units if registry is not None: obj.units.registry = registry return obj if input_array is NotImplemented: return input_array.view(cls) if registry is None and isinstance(input_units, (str, bytes)): if input_units.startswith('code_'): raise UnitParseError( "Code units used without referring to a dataset. \n" "Perhaps you meant to do something like this instead: \n" "ds.arr(%s, \"%s\")" % (input_array, input_units) ) if isinstance(input_array, YTArray): ret = input_array.view(cls) if input_units is None: if registry is None: ret.units = input_array.units else: units = Unit(str(input_array.units), registry=registry) ret.units = units elif isinstance(input_units, Unit): ret.units = input_units else: ret.units = Unit(input_units, registry=registry) return ret elif isinstance(input_array, np.ndarray): pass elif iterable(input_array) and input_array: if isinstance(input_array[0], YTArray): return YTArray(np.array(input_array, dtype=dtype), input_array[0].units, registry=registry) # Input array is an already formed ndarray instance # We first cast to be our class type obj = np.asarray(input_array, dtype=dtype).view(cls) # Check units type if input_units is None: # Nothing provided. Make dimensionless... units = Unit() elif isinstance(input_units, Unit): if registry and registry is not input_units.registry: units = Unit(str(input_units), registry=registry) else: units = input_units else: # units kwarg set, but it's not a Unit object. # don't handle all the cases here, let the Unit class handle if # it's a str. units = Unit(input_units, registry=registry) # Attach the units obj.units = units return obj def __repr__(self): """ """ return super(YTArray, self).__repr__()+' '+self.units.__repr__() def __str__(self): """ """ return str(self.view(np.ndarray)) + ' ' + str(self.units) # # Start unit conversion methods # def convert_to_units(self, units): """ Convert the array and units to the given units. Parameters ---------- units : Unit object or str The units you want to convert to. """ new_units = _unit_repr_check_same(self.units, units) (conversion_factor, offset) = self.units.get_conversion_factor(new_units) self.units = new_units values = self.d values *= conversion_factor if offset: np.subtract(self, offset*self.uq, self) return self def convert_to_base(self, unit_system="cgs"): """ Convert the array and units to the equivalent base units in the specified unit system. Parameters ---------- unit_system : string, optional The unit system to be used in the conversion. If not specified, the default base units of cgs are used. Examples -------- >>> E = YTQuantity(2.5, "erg/s") >>> E.convert_to_base(unit_system="galactic") """ return self.convert_to_units(self.units.get_base_equivalent(unit_system)) def convert_to_cgs(self): """ Convert the array and units to the equivalent cgs units. """ return self.convert_to_units(self.units.get_cgs_equivalent()) def convert_to_mks(self): """ Convert the array and units to the equivalent mks units. """ return self.convert_to_units(self.units.get_mks_equivalent()) def in_units(self, units, equivalence=None, **kwargs): """ Creates a copy of this array with the data in the supplied units, and returns it. Optionally, an equivalence can be specified to convert to an equivalent quantity which is not in the same dimensions. .. note:: All additional keyword arguments are passed to the equivalency, which should be used if that particular equivalency requires them. Parameters ---------- units : Unit object or string The units you want to get a new quantity in. equivalence : string, optional The equivalence you wish to use. To see which equivalencies are supported for this unitful quantity, try the :meth:`list_equivalencies` method. Default: None Returns ------- YTArray """ if equivalence is None: new_units = _unit_repr_check_same(self.units, units) (conversion_factor, offset) = self.units.get_conversion_factor(new_units) new_array = type(self)(self.ndview * conversion_factor, new_units) if offset: np.subtract(new_array, offset*new_array.uq, new_array) return new_array else: return self.to_equivalent(units, equivalence, **kwargs) def to(self, units, equivalence=None, **kwargs): """ An alias for YTArray.in_units(). See the docstrings of that function for details. """ return self.in_units(units, equivalence=equivalence, **kwargs) def to_value(self, units=None, equivalence=None, **kwargs): """ Creates a copy of this array with the data in the supplied units, and returns it without units. Output is therefore a bare NumPy array. Optionally, an equivalence can be specified to convert to an equivalent quantity which is not in the same dimensions. .. note:: All additional keyword arguments are passed to the equivalency, which should be used if that particular equivalency requires them. Parameters ---------- units : Unit object or string, optional The units you want to get the bare quantity in. If not specified, the value will be returned in the current units. equivalence : string, optional The equivalence you wish to use. To see which equivalencies are supported for this unitful quantity, try the :meth:`list_equivalencies` method. Default: None Returns ------- NumPy array """ if units is None: v = self.value else: v = self.in_units(units, equivalence=equivalence, **kwargs).value if isinstance(self, YTQuantity): return float(v) else: return v def in_base(self, unit_system="cgs"): """ Creates a copy of this array with the data in the specified unit system, and returns it in that system's base units. Parameters ---------- unit_system : string, optional The unit system to be used in the conversion. If not specified, the default base units of cgs are used. Examples -------- >>> E = YTQuantity(2.5, "erg/s") >>> E_new = E.in_base(unit_system="galactic") """ return self.in_units(self.units.get_base_equivalent(unit_system)) def in_cgs(self): """ Creates a copy of this array with the data in the equivalent cgs units, and returns it. Returns ------- Quantity object with data converted to cgs units. """ return self.in_units(self.units.get_cgs_equivalent()) def in_mks(self): """ Creates a copy of this array with the data in the equivalent mks units, and returns it. Returns ------- Quantity object with data converted to mks units. """ return self.in_units(self.units.get_mks_equivalent()) def to_equivalent(self, unit, equiv, **kwargs): """ Convert a YTArray or YTQuantity to an equivalent, e.g., something that is related by only a constant factor but not in the same units. Parameters ---------- unit : string The unit that you wish to convert to. equiv : string The equivalence you wish to use. To see which equivalencies are supported for this unitful quantity, try the :meth:`list_equivalencies` method. Examples -------- >>> a = yt.YTArray(1.0e7,"K") >>> a.to_equivalent("keV", "thermal") """ conv_unit = Unit(unit, registry=self.units.registry) if self.units.same_dimensions_as(conv_unit): return self.in_units(conv_unit) this_equiv = equivalence_registry[equiv]() oneway_or_equivalent = ( conv_unit.has_equivalent(equiv) or this_equiv._one_way) if self.has_equivalent(equiv) and oneway_or_equivalent: new_arr = this_equiv.convert( self, conv_unit.dimensions, **kwargs) if isinstance(new_arr, tuple): try: return type(self)(new_arr[0], new_arr[1]).in_units(unit) except YTUnitConversionError: raise YTInvalidUnitEquivalence(equiv, self.units, unit) else: return new_arr.in_units(unit) else: raise YTInvalidUnitEquivalence(equiv, self.units, unit) def list_equivalencies(self): """ Lists the possible equivalencies associated with this YTArray or YTQuantity. """ self.units.list_equivalencies() def has_equivalent(self, equiv): """ Check to see if this YTArray or YTQuantity has an equivalent unit in *equiv*. """ return self.units.has_equivalent(equiv) def ndarray_view(self): """ Returns a view into the array, but as an ndarray rather than ytarray. Returns ------- View of this array's data. """ return self.view(np.ndarray) def to_ndarray(self): """ Creates a copy of this array with the unit information stripped """ return np.array(self) @classmethod def from_astropy(cls, arr, unit_registry=None): """ Convert an AstroPy "Quantity" to a YTArray or YTQuantity. Parameters ---------- arr : AstroPy Quantity The Quantity to convert from. unit_registry : yt UnitRegistry, optional A yt unit registry to use in the conversion. If one is not supplied, the default one will be used. """ # Converting from AstroPy Quantity u = arr.unit ap_units = [] for base, exponent in zip(u.bases, u.powers): unit_str = base.to_string() # we have to do this because AstroPy is silly and defines # hour as "h" if unit_str == "h": unit_str = "hr" ap_units.append("%s**(%s)" % (unit_str, Rational(exponent))) ap_units = "*".join(ap_units) if isinstance(arr.value, np.ndarray): return YTArray(arr.value, ap_units, registry=unit_registry) else: return YTQuantity(arr.value, ap_units, registry=unit_registry) def to_astropy(self, **kwargs): """ Creates a new AstroPy quantity with the same unit information. """ if _astropy.units is None: raise ImportError("You don't have AstroPy installed, so you can't convert to " + "an AstroPy quantity.") return self.value*_astropy.units.Unit(str(self.units), **kwargs) @classmethod def from_pint(cls, arr, unit_registry=None): """ Convert a Pint "Quantity" to a YTArray or YTQuantity. Parameters ---------- arr : Pint Quantity The Quantity to convert from. unit_registry : yt UnitRegistry, optional A yt unit registry to use in the conversion. If one is not supplied, the default one will be used. Examples -------- >>> from pint import UnitRegistry >>> import numpy as np >>> ureg = UnitRegistry() >>> a = np.random.random(10) >>> b = ureg.Quantity(a, "erg/cm**3") >>> c = yt.YTArray.from_pint(b) """ p_units = [] for base, exponent in arr._units.items(): bs = convert_pint_units(base) p_units.append("%s**(%s)" % (bs, Rational(exponent))) p_units = "*".join(p_units) if isinstance(arr.magnitude, np.ndarray): return YTArray(arr.magnitude, p_units, registry=unit_registry) else: return YTQuantity(arr.magnitude, p_units, registry=unit_registry) def to_pint(self, unit_registry=None): """ Convert a YTArray or YTQuantity to a Pint Quantity. Parameters ---------- arr : YTArray or YTQuantity The unitful quantity to convert from. unit_registry : Pint UnitRegistry, optional The Pint UnitRegistry to use in the conversion. If one is not supplied, the default one will be used. NOTE: This is not the same as a yt UnitRegistry object. Examples -------- >>> a = YTQuantity(4.0, "cm**2/s") >>> b = a.to_pint() """ from pint import UnitRegistry if unit_registry is None: unit_registry = UnitRegistry() powers_dict = self.units.expr.as_powers_dict() units = [] for unit, pow in powers_dict.items(): # we have to do this because Pint doesn't recognize # "yr" as "year" if str(unit).endswith("yr") and len(str(unit)) in [2,3]: unit = str(unit).replace("yr","year") units.append("%s**(%s)" % (unit, Rational(pow))) units = "*".join(units) return unit_registry.Quantity(self.value, units) # # End unit conversion methods # def write_hdf5(self, filename, dataset_name=None, info=None, group_name=None): r"""Writes a YTArray to hdf5 file. Parameters ---------- filename: string The filename to create and write a dataset to dataset_name: string The name of the dataset to create in the file. info: dictionary A dictionary of supplementary info to write to append as attributes to the dataset. group_name: string An optional group to write the arrays to. If not specified, the arrays are datasets at the top level by default. Examples -------- >>> a = YTArray([1,2,3], 'cm') >>> myinfo = {'field':'dinosaurs', 'type':'field_data'} >>> a.write_hdf5('test_array_data.h5', dataset_name='dinosaurs', ... info=myinfo) """ from yt.utilities.on_demand_imports import _h5py as h5py from yt.extern.six.moves import cPickle as pickle if info is None: info = {} info['units'] = str(self.units) info['unit_registry'] = np.void(pickle.dumps(self.units.registry.lut)) if dataset_name is None: dataset_name = 'array_data' f = h5py.File(filename) if group_name is not None: if group_name in f: g = f[group_name] else: g = f.create_group(group_name) else: g = f if dataset_name in g.keys(): d = g[dataset_name] # Overwrite without deleting if we can get away with it. if d.shape == self.shape and d.dtype == self.dtype: d[...] = self for k in d.attrs.keys(): del d.attrs[k] else: del f[dataset_name] d = g.create_dataset(dataset_name, data=self) else: d = g.create_dataset(dataset_name, data=self) for k, v in info.items(): d.attrs[k] = v f.close() @classmethod def from_hdf5(cls, filename, dataset_name=None, group_name=None): r"""Attempts read in and convert a dataset in an hdf5 file into a YTArray. Parameters ---------- filename: string The filename to of the hdf5 file. dataset_name: string The name of the dataset to read from. If the dataset has a units attribute, attempt to infer units as well. group_name: string An optional group to read the arrays from. If not specified, the arrays are datasets at the top level by default. """ import h5py from yt.extern.six.moves import cPickle as pickle if dataset_name is None: dataset_name = 'array_data' f = h5py.File(filename) if group_name is not None: g = f[group_name] else: g = f dataset = g[dataset_name] data = dataset[:] units = dataset.attrs.get('units', '') if 'unit_registry' in dataset.attrs.keys(): unit_lut = pickle.loads(dataset.attrs['unit_registry'].tostring()) else: unit_lut = None f.close() registry = UnitRegistry(lut=unit_lut, add_default_symbols=False) return cls(data, units, registry=registry) # # Start convenience methods # @property def value(self): """Get a copy of the array data as a numpy ndarray""" return np.array(self) v = value @property def ndview(self): """Get a view of the array data.""" return self.ndarray_view() d = ndview @property def unit_quantity(self): """Get a YTQuantity with the same unit as this array and a value of 1.0""" return YTQuantity(1.0, self.units) uq = unit_quantity @property def unit_array(self): """Get a YTArray filled with ones with the same unit and shape as this array""" return np.ones_like(self) ua = unit_array def __getitem__(self, item): ret = super(YTArray, self).__getitem__(item) if ret.shape == (): return YTQuantity(ret, self.units, bypass_validation=True) else: if hasattr(self, 'units'): ret.units = self.units return ret # # Start operation methods # if LooseVersion(np.__version__) < LooseVersion('1.13.0'): def __add__(self, right_object): """ Add this ytarray to the object on the right of the `+` operator. Must check for the correct (same dimension) units. """ ro = sanitize_units_add(self, right_object, "addition") return super(YTArray, self).__add__(ro) def __radd__(self, left_object): """ See __add__. """ lo = sanitize_units_add(self, left_object, "addition") return super(YTArray, self).__radd__(lo) def __iadd__(self, other): """ See __add__. """ oth = sanitize_units_add(self, other, "addition") np.add(self, oth, out=self) return self def __sub__(self, right_object): """ Subtract the object on the right of the `-` from this ytarray. Must check for the correct (same dimension) units. """ ro = sanitize_units_add(self, right_object, "subtraction") return super(YTArray, self).__sub__(ro) def __rsub__(self, left_object): """ See __sub__. """ lo = sanitize_units_add(self, left_object, "subtraction") return super(YTArray, self).__rsub__(lo) def __isub__(self, other): """ See __sub__. """ oth = sanitize_units_add(self, other, "subtraction") np.subtract(self, oth, out=self) return self def __neg__(self): """ Negate the data. """ return super(YTArray, self).__neg__() def __mul__(self, right_object): """ Multiply this YTArray by the object on the right of the `*` operator. The unit objects handle being multiplied. """ ro = sanitize_units_mul(self, right_object) return super(YTArray, self).__mul__(ro) def __rmul__(self, left_object): """ See __mul__. """ lo = sanitize_units_mul(self, left_object) return super(YTArray, self).__rmul__(lo) def __imul__(self, other): """ See __mul__. """ oth = sanitize_units_mul(self, other) np.multiply(self, oth, out=self) return self def __div__(self, right_object): """ Divide this YTArray by the object on the right of the `/` operator. """ ro = sanitize_units_mul(self, right_object) return super(YTArray, self).__div__(ro) def __rdiv__(self, left_object): """ See __div__. """ lo = sanitize_units_mul(self, left_object) return super(YTArray, self).__rdiv__(lo) def __idiv__(self, other): """ See __div__. """ oth = sanitize_units_mul(self, other) np.divide(self, oth, out=self) return self def __truediv__(self, right_object): ro = sanitize_units_mul(self, right_object) return super(YTArray, self).__truediv__(ro) def __rtruediv__(self, left_object): """ See __div__. """ lo = sanitize_units_mul(self, left_object) return super(YTArray, self).__rtruediv__(lo) def __itruediv__(self, other): """ See __div__. """ oth = sanitize_units_mul(self, other) np.true_divide(self, oth, out=self) return self def __floordiv__(self, right_object): ro = sanitize_units_mul(self, right_object) return super(YTArray, self).__floordiv__(ro) def __rfloordiv__(self, left_object): """ See __div__. """ lo = sanitize_units_mul(self, left_object) return super(YTArray, self).__rfloordiv__(lo) def __ifloordiv__(self, other): """ See __div__. """ oth = sanitize_units_mul(self, other) np.floor_divide(self, oth, out=self) return self def __or__(self, right_object): return super(YTArray, self).__or__(right_object) def __ror__(self, left_object): return super(YTArray, self).__ror__(left_object) def __ior__(self, other): np.bitwise_or(self, other, out=self) return self def __xor__(self, right_object): return super(YTArray, self).__xor__(right_object) def __rxor__(self, left_object): return super(YTArray, self).__rxor__(left_object) def __ixor__(self, other): np.bitwise_xor(self, other, out=self) return self def __and__(self, right_object): return super(YTArray, self).__and__(right_object) def __rand__(self, left_object): return super(YTArray, self).__rand__(left_object) def __iand__(self, other): np.bitwise_and(self, other, out=self) return self def __pow__(self, power): """ Raise this YTArray to some power. Parameters ---------- power : float or dimensionless YTArray. The pow value. """ if isinstance(power, YTArray): if not power.units.is_dimensionless: raise YTUnitOperationError('power', power.unit) # Work around a sympy issue (I think?) # # If I don't do this, super(YTArray, self).__pow__ returns a YTArray # with a unit attribute set to the sympy expression 1/1 rather than # a dimensionless Unit object. if self.units.is_dimensionless and power == -1: ret = super(YTArray, self).__pow__(power) return type(self)(ret, input_units='') return super(YTArray, self).__pow__(power) def __abs__(self): """ Return a YTArray with the abs of the data. """ return super(YTArray, self).__abs__() # # Start comparison operators. # def __lt__(self, other): """ Test if this is less than the object on the right. """ # converts if possible oth = validate_comparison_units(self, other, 'less_than') return super(YTArray, self).__lt__(oth) def __le__(self, other): """Test if this is less than or equal to the object on the right. """ oth = validate_comparison_units(self, other, 'less_than or equal') return super(YTArray, self).__le__(oth) def __eq__(self, other): """ Test if this is equal to the object on the right. """ # Check that other is a YTArray. if other is None: # self is a YTArray, so it can't be None. return False oth = validate_comparison_units(self, other, 'equal') return super(YTArray, self).__eq__(oth) def __ne__(self, other): """ Test if this is not equal to the object on the right. """ # Check that the other is a YTArray. if other is None: return True oth = validate_comparison_units(self, other, 'not equal') return super(YTArray, self).__ne__(oth) def __ge__(self, other): """ Test if this is greater than or equal to other. """ # Check that the other is a YTArray. oth = validate_comparison_units( self, other, 'greater than or equal') return super(YTArray, self).__ge__(oth) def __gt__(self, other): """ Test if this is greater than the object on the right. """ # Check that the other is a YTArray. oth = validate_comparison_units(self, other, 'greater than') return super(YTArray, self).__gt__(oth) # # End comparison operators # # # Begin reduction operators # @return_arr def prod(self, axis=None, dtype=None, out=None): if axis is not None: units = self.units**self.shape[axis] else: units = self.units**self.size return super(YTArray, self).prod(axis, dtype, out), units @return_arr def mean(self, axis=None, dtype=None, out=None): return super(YTArray, self).mean(axis, dtype, out), self.units @return_arr def sum(self, axis=None, dtype=None, out=None): return super(YTArray, self).sum(axis, dtype, out), self.units @return_arr def std(self, axis=None, dtype=None, out=None, ddof=0): return super(YTArray, self).std(axis, dtype, out, ddof), self.units def __array_wrap__(self, out_arr, context=None): ret = super(YTArray, self).__array_wrap__(out_arr, context) if isinstance(ret, YTQuantity) and ret.shape != (): ret = ret.view(YTArray) if context is None: if ret.shape == (): return ret[()] else: return ret ufunc = context[0] inputs = context[1] if ufunc in unary_operators: out_arr, inp, u = get_inp_u_unary(ufunc, inputs, out_arr) unit = self._ufunc_registry[context[0]](u) ret_class = type(self) elif ufunc in binary_operators: unit_operator = self._ufunc_registry[context[0]] inps, units, ret_class = get_inp_u_binary(ufunc, inputs) if unit_operator in (preserve_units, comparison_unit, arctan2_unit): inps, units = handle_comparison_units( inps, units, ufunc, ret_class, raise_error=True) unit = unit_operator(*units) if unit_operator in (multiply_units, divide_units): out_arr, out_arr, unit = handle_multiply_divide_units( unit, units, out_arr, out_arr) else: raise RuntimeError( "Support for the %s ufunc has not been added " "to YTArray." % str(context[0])) if unit is None: out_arr = np.array(out_arr, copy=False) return out_arr out_arr.units = unit if out_arr.size == 1: return YTQuantity(np.array(out_arr), unit) else: if ret_class is YTQuantity: # This happens if you do ndarray * YTQuantity. Explicitly # casting to YTArray avoids creating a YTQuantity with # size > 1 return YTArray(np.array(out_arr), unit) return ret_class(np.array(out_arr, copy=False), unit) else: # numpy version equal to or newer than 1.13 def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): func = getattr(ufunc, method) if 'out' in kwargs: out_orig = kwargs.pop('out') out = np.asarray(out_orig[0]) else: out = None if len(inputs) == 1: _, inp, u = get_inp_u_unary(ufunc, inputs) out_arr = func(np.asarray(inp), out=out, **kwargs) if ufunc in (multiply, divide) and method == 'reduce': power_sign = POWER_SIGN_MAPPING[ufunc] if 'axis' in kwargs and kwargs['axis'] is not None: unit = u**(power_sign*inp.shape[kwargs['axis']]) else: unit = u**(power_sign*inp.size) else: unit = self._ufunc_registry[ufunc](u) ret_class = type(self) elif len(inputs) == 2: unit_operator = self._ufunc_registry[ufunc] inps, units, ret_class = get_inp_u_binary(ufunc, inputs) if unit_operator in (comparison_unit, arctan2_unit): inps, units = handle_comparison_units( inps, units, ufunc, ret_class) elif unit_operator is preserve_units: inps, units = handle_preserve_units( inps, units, ufunc, ret_class) unit = unit_operator(*units) out_arr = func(np.asarray(inps[0]), np.asarray(inps[1]), out=out, **kwargs) if unit_operator in (multiply_units, divide_units): out, out_arr, unit = handle_multiply_divide_units( unit, units, out, out_arr) else: raise RuntimeError( "Support for the %s ufunc with %i inputs has not been" "added to YTArray." % (str(ufunc), len(inputs))) if unit is None: out_arr = np.array(out_arr, copy=False) elif ufunc in (modf, divmod_): out_arr = tuple((ret_class(o, unit) for o in out_arr)) elif out_arr.size == 1: out_arr = YTQuantity(np.asarray(out_arr), unit) else: if ret_class is YTQuantity: # This happens if you do ndarray * YTQuantity. Explicitly # casting to YTArray avoids creating a YTQuantity with # size > 1 out_arr = YTArray(np.asarray(out_arr), unit) else: out_arr = ret_class(
np.asarray(out_arr)
numpy.asarray
# ________ # / # \ / # \ / # \/ import random import textwrap import emd_mean import AdvEMDpy import emd_basis import emd_utils import numpy as np import pandas as pd import cvxpy as cvx import seaborn as sns import matplotlib.pyplot as plt from scipy.integrate import odeint from scipy.ndimage import gaussian_filter from emd_utils import time_extension, Utility from scipy.interpolate import CubicSpline from emd_hilbert import Hilbert, hilbert_spectrum from emd_preprocess import Preprocess from emd_mean import Fluctuation from AdvEMDpy import EMD # alternate packages from PyEMD import EMD as pyemd0215 import emd as emd040 sns.set(style='darkgrid') pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001) pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time) pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series) # plot 0 - addition fig = plt.figure(figsize=(9, 4)) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('First Iteration of Sifting Algorithm') plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1) plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()], c='r', label=r'$M(t_i)$', zorder=2) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4) plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()], c='c', label=r'$m(t_j)$', zorder=3) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5) plt.yticks(ticks=[-2, -1, 0, 1, 2]) plt.xticks(ticks=[0, np.pi, 2 * np.pi], labels=[r'0', r'$\pi$', r'$2\pi$']) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/pseudo_algorithm.png') plt.show() knots = np.arange(12) time = np.linspace(0, 11, 1101) basis = emd_basis.Basis(time=time, time_series=time) b_spline_basis = basis.cubic_b_spline(knots) chsi_basis = basis.chsi_basis(knots) # plot 1 plt.title('Non-Natural Cubic B-Spline Bases at Boundary') plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $') plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $') plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $') plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $') plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $') plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $']) plt.xlim(4.4, 6.6) plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.legend(loc='upper left') plt.savefig('jss_figures/boundary_bases.png') plt.show() # plot 1a - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) knots_uniform = np.linspace(0, 2 * np.pi, 51) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0] fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Uniform Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Uniform Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Uniform Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots_uniform)): axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_uniform.png') plt.show() # plot 1b - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=1, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Statically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Statically Optimised Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Statically Optimised Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots)): axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_1.png') plt.show() # plot 1c - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=2, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Dynamically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Dynamically Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Dynamically Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots[i])): axs[i].plot(knots[i][j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_2.png') plt.show() # plot 1d - addition window = 81 fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Filtering Demonstration') axs[1].set_title('Zoomed Region') preprocess_time = pseudo_alg_time.copy() np.random.seed(1) random.seed(1) preprocess_time_series = pseudo_alg_time_series + np.random.normal(0, 0.1, len(preprocess_time)) for i in random.sample(range(1000), 500): preprocess_time_series[i] += np.random.normal(0, 1) preprocess = Preprocess(time=preprocess_time, time_series=preprocess_time_series) axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[0].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[0].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[0].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[1].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[1].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[1].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_filter.png') plt.show() # plot 1e - addition fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Smoothing Demonstration') axs[1].set_title('Zoomed Region') axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[0].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) downsampled_and_decimated = preprocess.downsample() axs[0].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 11)) downsampled = preprocess.downsample(decimate=False) axs[0].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[1].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) axs[1].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 13)) axs[1].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.06, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.06, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_smooth.png') plt.show() # plot 2 fig, axs = plt.subplots(1, 2, sharey=True) axs[0].set_title('Cubic B-Spline Bases') axs[0].plot(time, b_spline_basis[2, :].T, '--', label='Basis 1') axs[0].plot(time, b_spline_basis[3, :].T, '--', label='Basis 2') axs[0].plot(time, b_spline_basis[4, :].T, '--', label='Basis 3') axs[0].plot(time, b_spline_basis[5, :].T, '--', label='Basis 4') axs[0].legend(loc='upper left') axs[0].plot(5 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].plot(6 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].set_xticks([5, 6]) axs[0].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[0].set_xlim(4.5, 6.5) axs[1].set_title('Cubic Hermite Spline Bases') axs[1].plot(time, chsi_basis[10, :].T, '--') axs[1].plot(time, chsi_basis[11, :].T, '--') axs[1].plot(time, chsi_basis[12, :].T, '--') axs[1].plot(time, chsi_basis[13, :].T, '--') axs[1].plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].set_xticks([5, 6]) axs[1].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[1].set_xlim(4.5, 6.5) plt.savefig('jss_figures/comparing_bases.png') plt.show() # plot 3 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_dash = maxima_y[-1] * np.ones_like(max_dash_time) min_dash_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_dash = minima_y[-1] * np.ones_like(min_dash_time) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) max_discard = maxima_y[-1] max_discard_time = minima_x[-1] - maxima_x[-1] + minima_x[-1] max_discard_dash_time = np.linspace(max_discard_time - width, max_discard_time + width, 101) max_discard_dash = max_discard * np.ones_like(max_discard_dash_time) dash_2_time = np.linspace(minima_x[-1], max_discard_time, 101) dash_2 = np.linspace(minima_y[-1], max_discard, 101) end_point_time = time[-1] end_point = time_series[-1] time_reflect = np.linspace((5 - a) * np.pi, (5 + a) * np.pi, 101) time_series_reflect = np.flip(np.cos(np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101)) + np.cos(5 * np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101))) time_series_anti_reflect = time_series_reflect[0] - time_series_reflect utils = emd_utils.Utility(time=time, time_series=time_series_anti_reflect) anti_max_bool = utils.max_bool_func_1st_order_fd() anti_max_point_time = time_reflect[anti_max_bool] anti_max_point = time_series_anti_reflect[anti_max_bool] utils = emd_utils.Utility(time=time, time_series=time_series_reflect) no_anchor_max_time = time_reflect[utils.max_bool_func_1st_order_fd()] no_anchor_max = time_series_reflect[utils.max_bool_func_1st_order_fd()] point_1 = 5.4 length_distance = np.linspace(maxima_y[-1], minima_y[-1], 101) length_distance_time = point_1 * np.pi * np.ones_like(length_distance) length_time = np.linspace(point_1 * np.pi - width, point_1 * np.pi + width, 101) length_top = maxima_y[-1] * np.ones_like(length_time) length_bottom = minima_y[-1] * np.ones_like(length_time) point_2 = 5.2 length_distance_2 = np.linspace(time_series[-1], minima_y[-1], 101) length_distance_time_2 = point_2 * np.pi * np.ones_like(length_distance_2) length_time_2 = np.linspace(point_2 * np.pi - width, point_2 * np.pi + width, 101) length_top_2 = time_series[-1] * np.ones_like(length_time_2) length_bottom_2 = minima_y[-1] * np.ones_like(length_time_2) symmetry_axis_1_time = minima_x[-1] * np.ones(101) symmetry_axis_2_time = time[-1] * np.ones(101) symmetry_axis = np.linspace(-2, 2, 101) end_time = np.linspace(time[-1] - width, time[-1] + width, 101) end_signal = time_series[-1] * np.ones_like(end_time) anti_symmetric_time = np.linspace(time[-1] - 0.5, time[-1] + 0.5, 101) anti_symmetric_signal = time_series[-1] * np.ones_like(anti_symmetric_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Symmetry Edge Effects Example') plt.plot(time_reflect, time_series_reflect, 'g--', LineWidth=2, label=textwrap.fill('Symmetric signal', 10)) plt.plot(time_reflect[:51], time_series_anti_reflect[:51], '--', c='purple', LineWidth=2, label=textwrap.fill('Anti-symmetric signal', 10)) plt.plot(max_dash_time, max_dash, 'k-') plt.plot(min_dash_time, min_dash, 'k-') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(length_distance_time, length_distance, 'k--') plt.plot(length_distance_time_2, length_distance_2, 'k--') plt.plot(length_time, length_top, 'k-') plt.plot(length_time, length_bottom, 'k-') plt.plot(length_time_2, length_top_2, 'k-') plt.plot(length_time_2, length_bottom_2, 'k-') plt.plot(end_time, end_signal, 'k-') plt.plot(symmetry_axis_1_time, symmetry_axis, 'r--', zorder=1) plt.plot(anti_symmetric_time, anti_symmetric_signal, 'r--', zorder=1) plt.plot(symmetry_axis_2_time, symmetry_axis, 'r--', label=textwrap.fill('Axes of symmetry', 10), zorder=1) plt.text(5.1 * np.pi, -0.7, r'$\beta$L') plt.text(5.34 * np.pi, -0.05, 'L') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(max_discard_time, max_discard, c='purple', zorder=4, label=textwrap.fill('Symmetric Discard maxima', 10)) plt.scatter(end_point_time, end_point, c='orange', zorder=4, label=textwrap.fill('Symmetric Anchor maxima', 10)) plt.scatter(anti_max_point_time, anti_max_point, c='green', zorder=4, label=textwrap.fill('Anti-Symmetric maxima', 10)) plt.scatter(no_anchor_max_time, no_anchor_max, c='gray', zorder=4, label=textwrap.fill('Symmetric maxima', 10)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_symmetry_anti.png') plt.show() # plot 4 a = 0.21 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_1 = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_dash_2 = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_dash_time_1 = maxima_x[-1] * np.ones_like(max_dash_1) max_dash_time_2 = maxima_x[-2] * np.ones_like(max_dash_1) min_dash_1 = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_dash_2 = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_dash_time_1 = minima_x[-1] * np.ones_like(min_dash_1) min_dash_time_2 = minima_x[-2] * np.ones_like(min_dash_1) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) dash_2_time = np.linspace(maxima_x[-1], minima_x[-2], 101) dash_2 = np.linspace(maxima_y[-1], minima_y[-2], 101) s1 = (minima_y[-2] - maxima_y[-1]) / (minima_x[-2] - maxima_x[-1]) slope_based_maximum_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) slope_based_maximum = minima_y[-1] + (slope_based_maximum_time - minima_x[-1]) * s1 max_dash_time_3 = slope_based_maximum_time * np.ones_like(max_dash_1) max_dash_3 = np.linspace(slope_based_maximum - width, slope_based_maximum + width, 101) dash_3_time = np.linspace(minima_x[-1], slope_based_maximum_time, 101) dash_3 = np.linspace(minima_y[-1], slope_based_maximum, 101) s2 = (minima_y[-1] - maxima_y[-1]) / (minima_x[-1] - maxima_x[-1]) slope_based_minimum_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) slope_based_minimum = slope_based_maximum - (slope_based_maximum_time - slope_based_minimum_time) * s2 min_dash_time_3 = slope_based_minimum_time * np.ones_like(min_dash_1) min_dash_3 = np.linspace(slope_based_minimum - width, slope_based_minimum + width, 101) dash_4_time = np.linspace(slope_based_maximum_time, slope_based_minimum_time) dash_4 = np.linspace(slope_based_maximum, slope_based_minimum) maxima_dash = np.linspace(2.5 - width, 2.5 + width, 101) maxima_dash_time_1 = maxima_x[-2] * np.ones_like(maxima_dash) maxima_dash_time_2 = maxima_x[-1] * np.ones_like(maxima_dash) maxima_dash_time_3 = slope_based_maximum_time * np.ones_like(maxima_dash) maxima_line_dash_time = np.linspace(maxima_x[-2], slope_based_maximum_time, 101) maxima_line_dash = 2.5 * np.ones_like(maxima_line_dash_time) minima_dash = np.linspace(-3.4 - width, -3.4 + width, 101) minima_dash_time_1 = minima_x[-2] * np.ones_like(minima_dash) minima_dash_time_2 = minima_x[-1] * np.ones_like(minima_dash) minima_dash_time_3 = slope_based_minimum_time * np.ones_like(minima_dash) minima_line_dash_time = np.linspace(minima_x[-2], slope_based_minimum_time, 101) minima_line_dash = -3.4 * np.ones_like(minima_line_dash_time) # slightly edit signal to make difference between slope-based method and improved slope-based method more clear time_series[time >= minima_x[-1]] = 1.5 * (time_series[time >= minima_x[-1]] - time_series[time == minima_x[-1]]) + \ time_series[time == minima_x[-1]] improved_slope_based_maximum_time = time[-1] improved_slope_based_maximum = time_series[-1] improved_slope_based_minimum_time = slope_based_minimum_time improved_slope_based_minimum = improved_slope_based_maximum + s2 * (improved_slope_based_minimum_time - improved_slope_based_maximum_time) min_dash_4 = np.linspace(improved_slope_based_minimum - width, improved_slope_based_minimum + width, 101) min_dash_time_4 = improved_slope_based_minimum_time * np.ones_like(min_dash_4) dash_final_time = np.linspace(improved_slope_based_maximum_time, improved_slope_based_minimum_time, 101) dash_final = np.linspace(improved_slope_based_maximum, improved_slope_based_minimum, 101) ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 0.9 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Slope-Based Edge Effects Example') plt.plot(max_dash_time_1, max_dash_1, 'k-') plt.plot(max_dash_time_2, max_dash_2, 'k-') plt.plot(max_dash_time_3, max_dash_3, 'k-') plt.plot(min_dash_time_1, min_dash_1, 'k-') plt.plot(min_dash_time_2, min_dash_2, 'k-') plt.plot(min_dash_time_3, min_dash_3, 'k-') plt.plot(min_dash_time_4, min_dash_4, 'k-') plt.plot(maxima_dash_time_1, maxima_dash, 'k-') plt.plot(maxima_dash_time_2, maxima_dash, 'k-') plt.plot(maxima_dash_time_3, maxima_dash, 'k-') plt.plot(minima_dash_time_1, minima_dash, 'k-') plt.plot(minima_dash_time_2, minima_dash, 'k-') plt.plot(minima_dash_time_3, minima_dash, 'k-') plt.text(4.34 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.74 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.12 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.50 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.30 * np.pi, 0.35, r'$s_1$') plt.text(4.43 * np.pi, -0.20, r'$s_2$') plt.text(4.30 * np.pi + (minima_x[-1] - minima_x[-2]), 0.35 + (minima_y[-1] - minima_y[-2]), r'$s_1$') plt.text(4.43 * np.pi + (slope_based_minimum_time - minima_x[-1]), -0.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.text(4.50 * np.pi + (slope_based_minimum_time - minima_x[-1]), 1.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.plot(minima_line_dash_time, minima_line_dash, 'k--') plt.plot(maxima_line_dash_time, maxima_line_dash, 'k--') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(dash_3_time, dash_3, 'k--') plt.plot(dash_4_time, dash_4, 'k--') plt.plot(dash_final_time, dash_final, 'k--') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(slope_based_maximum_time, slope_based_maximum, c='orange', zorder=4, label=textwrap.fill('Slope-based maximum', 11)) plt.scatter(slope_based_minimum_time, slope_based_minimum, c='purple', zorder=4, label=textwrap.fill('Slope-based minimum', 11)) plt.scatter(improved_slope_based_maximum_time, improved_slope_based_maximum, c='deeppink', zorder=4, label=textwrap.fill('Improved slope-based maximum', 11)) plt.scatter(improved_slope_based_minimum_time, improved_slope_based_minimum, c='dodgerblue', zorder=4, label=textwrap.fill('Improved slope-based minimum', 11)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-3, -2, -1, 0, 1, 2), ('-3', '-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_slope_based.png') plt.show() # plot 5 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] A2 = np.abs(maxima_y[-2] - minima_y[-2]) / 2 A1 = np.abs(maxima_y[-1] - minima_y[-1]) / 2 P2 = 2 * np.abs(maxima_x[-2] - minima_x[-2]) P1 = 2 * np.abs(maxima_x[-1] - minima_x[-1]) Huang_time = (P1 / P2) * (time[time >= maxima_x[-2]] - time[time == maxima_x[-2]]) + maxima_x[-1] Huang_wave = (A1 / A2) * (time_series[time >= maxima_x[-2]] - time_series[time == maxima_x[-2]]) + maxima_y[-1] Coughlin_time = Huang_time Coughlin_wave = A1 * np.cos(2 * np.pi * (1 / P1) * (Coughlin_time - Coughlin_time[0])) Average_max_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) Average_max = (maxima_y[-2] + maxima_y[-1]) / 2 Average_min_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) Average_min = (minima_y[-2] + minima_y[-1]) / 2 utils_Huang = emd_utils.Utility(time=time, time_series=Huang_wave) Huang_max_bool = utils_Huang.max_bool_func_1st_order_fd() Huang_min_bool = utils_Huang.min_bool_func_1st_order_fd() utils_Coughlin = emd_utils.Utility(time=time, time_series=Coughlin_wave) Coughlin_max_bool = utils_Coughlin.max_bool_func_1st_order_fd() Coughlin_min_bool = utils_Coughlin.min_bool_func_1st_order_fd() Huang_max_time = Huang_time[Huang_max_bool] Huang_max = Huang_wave[Huang_max_bool] Huang_min_time = Huang_time[Huang_min_bool] Huang_min = Huang_wave[Huang_min_bool] Coughlin_max_time = Coughlin_time[Coughlin_max_bool] Coughlin_max = Coughlin_wave[Coughlin_max_bool] Coughlin_min_time = Coughlin_time[Coughlin_min_bool] Coughlin_min = Coughlin_wave[Coughlin_min_bool] max_2_x_time = np.linspace(maxima_x[-2] - width, maxima_x[-2] + width, 101) max_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) max_2_x = maxima_y[-2] * np.ones_like(max_2_x_time) min_2_x_time = np.linspace(minima_x[-2] - width, minima_x[-2] + width, 101) min_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) min_2_x = minima_y[-2] * np.ones_like(min_2_x_time) dash_max_min_2_x = np.linspace(minima_y[-2], maxima_y[-2], 101) dash_max_min_2_x_time = 5.3 * np.pi * np.ones_like(dash_max_min_2_x) max_2_y = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) max_2_y_time = maxima_x[-2] * np.ones_like(max_2_y) min_2_y = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) min_2_y_time = minima_x[-2] * np.ones_like(min_2_y) dash_max_min_2_y_time = np.linspace(minima_x[-2], maxima_x[-2], 101) dash_max_min_2_y = -1.8 * np.ones_like(dash_max_min_2_y_time) max_1_x_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) max_1_x = maxima_y[-1] * np.ones_like(max_1_x_time) min_1_x_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) min_1_x = minima_y[-1] * np.ones_like(min_1_x_time) dash_max_min_1_x = np.linspace(minima_y[-1], maxima_y[-1], 101) dash_max_min_1_x_time = 5.4 * np.pi * np.ones_like(dash_max_min_1_x) max_1_y = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) max_1_y_time = maxima_x[-1] * np.ones_like(max_1_y) min_1_y = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) min_1_y_time = minima_x[-1] * np.ones_like(min_1_y) dash_max_min_1_y_time = np.linspace(minima_x[-1], maxima_x[-1], 101) dash_max_min_1_y = -2.1 * np.ones_like(dash_max_min_1_y_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Characteristic Wave Effects Example') plt.plot(time, time_series, LineWidth=2, label='Signal') plt.scatter(Huang_max_time, Huang_max, c='magenta', zorder=4, label=textwrap.fill('Huang maximum', 10)) plt.scatter(Huang_min_time, Huang_min, c='lime', zorder=4, label=textwrap.fill('Huang minimum', 10)) plt.scatter(Coughlin_max_time, Coughlin_max, c='darkorange', zorder=4, label=textwrap.fill('Coughlin maximum', 14)) plt.scatter(Coughlin_min_time, Coughlin_min, c='dodgerblue', zorder=4, label=textwrap.fill('Coughlin minimum', 14)) plt.scatter(Average_max_time, Average_max, c='orangered', zorder=4, label=textwrap.fill('Average maximum', 14)) plt.scatter(Average_min_time, Average_min, c='cyan', zorder=4, label=textwrap.fill('Average minimum', 14)) plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.plot(Huang_time, Huang_wave, '--', c='darkviolet', label=textwrap.fill('Huang Characteristic Wave', 14)) plt.plot(Coughlin_time, Coughlin_wave, '--', c='darkgreen', label=textwrap.fill('Coughlin Characteristic Wave', 14)) plt.plot(max_2_x_time, max_2_x, 'k-') plt.plot(max_2_x_time_side, max_2_x, 'k-') plt.plot(min_2_x_time, min_2_x, 'k-') plt.plot(min_2_x_time_side, min_2_x, 'k-') plt.plot(dash_max_min_2_x_time, dash_max_min_2_x, 'k--') plt.text(5.16 * np.pi, 0.85, r'$2a_2$') plt.plot(max_2_y_time, max_2_y, 'k-') plt.plot(max_2_y_time, max_2_y_side, 'k-') plt.plot(min_2_y_time, min_2_y, 'k-') plt.plot(min_2_y_time, min_2_y_side, 'k-') plt.plot(dash_max_min_2_y_time, dash_max_min_2_y, 'k--') plt.text(4.08 * np.pi, -2.2, r'$\frac{p_2}{2}$') plt.plot(max_1_x_time, max_1_x, 'k-') plt.plot(max_1_x_time_side, max_1_x, 'k-') plt.plot(min_1_x_time, min_1_x, 'k-') plt.plot(min_1_x_time_side, min_1_x, 'k-') plt.plot(dash_max_min_1_x_time, dash_max_min_1_x, 'k--') plt.text(5.42 * np.pi, -0.1, r'$2a_1$') plt.plot(max_1_y_time, max_1_y, 'k-') plt.plot(max_1_y_time, max_1_y_side, 'k-') plt.plot(min_1_y_time, min_1_y, 'k-') plt.plot(min_1_y_time, min_1_y_side, 'k-') plt.plot(dash_max_min_1_y_time, dash_max_min_1_y, 'k--') plt.text(4.48 * np.pi, -2.5, r'$\frac{p_1}{2}$') plt.xlim(3.9 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_characteristic_wave.png') plt.show() # plot 6 t = np.linspace(5, 95, 100) signal_orig = np.cos(2 * np.pi * t / 50) + 0.6 * np.cos(2 * np.pi * t / 25) + 0.5 * np.sin(2 * np.pi * t / 200) util_nn = emd_utils.Utility(time=t, time_series=signal_orig) maxima = signal_orig[util_nn.max_bool_func_1st_order_fd()] minima = signal_orig[util_nn.min_bool_func_1st_order_fd()] cs_max = CubicSpline(t[util_nn.max_bool_func_1st_order_fd()], maxima) cs_min = CubicSpline(t[util_nn.min_bool_func_1st_order_fd()], minima) time = np.linspace(0, 5 * np.pi, 1001) lsq_signal = np.cos(time) + np.cos(5 * time) knots = np.linspace(0, 5 * np.pi, 101) time_extended = time_extension(time) time_series_extended = np.zeros_like(time_extended) / 0 time_series_extended[int(len(lsq_signal) - 1):int(2 * (len(lsq_signal) - 1) + 1)] = lsq_signal neural_network_m = 200 neural_network_k = 100 # forward -> P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[(-(neural_network_m + neural_network_k - col)):(-(neural_network_m - col))] P[-1, col] = 1 # for additive constant t = lsq_signal[-neural_network_m:] # test - top seed_weights = np.ones(neural_network_k) / neural_network_k weights = 0 * seed_weights.copy() train_input = P[:-1, :] lr = 0.01 for iterations in range(1000): output = np.matmul(weights, train_input) error = (t - output) gradients = error * (- train_input) # guess average gradients average_gradients = np.mean(gradients, axis=1) # steepest descent max_gradient_vector = average_gradients * (np.abs(average_gradients) == max(np.abs(average_gradients))) adjustment = - lr * average_gradients # adjustment = - lr * max_gradient_vector weights += adjustment # test - bottom weights_right = np.hstack((weights, 0)) max_count_right = 0 min_count_right = 0 i_right = 0 while ((max_count_right < 1) or (min_count_right < 1)) and (i_right < len(lsq_signal) - 1): time_series_extended[int(2 * (len(lsq_signal) - 1) + 1 + i_right)] = \ sum(weights_right * np.hstack((time_series_extended[ int(2 * (len(lsq_signal) - 1) + 1 - neural_network_k + i_right): int(2 * (len(lsq_signal) - 1) + 1 + i_right)], 1))) i_right += 1 if i_right > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_right += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_right += 1 # backward <- P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[int(col + 1):int(col + neural_network_k + 1)] P[-1, col] = 1 # for additive constant t = lsq_signal[:neural_network_m] vx = cvx.Variable(int(neural_network_k + 1)) objective = cvx.Minimize(cvx.norm((2 * (vx * P) + 1 - t), 2)) # linear activation function is arbitrary prob = cvx.Problem(objective) result = prob.solve(verbose=True, solver=cvx.ECOS) weights_left = np.array(vx.value) max_count_left = 0 min_count_left = 0 i_left = 0 while ((max_count_left < 1) or (min_count_left < 1)) and (i_left < len(lsq_signal) - 1): time_series_extended[int(len(lsq_signal) - 2 - i_left)] = \ 2 * sum(weights_left * np.hstack((time_series_extended[int(len(lsq_signal) - 1 - i_left): int(len(lsq_signal) - 1 - i_left + neural_network_k)], 1))) + 1 i_left += 1 if i_left > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_left += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_left += 1 lsq_utils = emd_utils.Utility(time=time, time_series=lsq_signal) utils_extended = emd_utils.Utility(time=time_extended, time_series=time_series_extended) maxima = lsq_signal[lsq_utils.max_bool_func_1st_order_fd()] maxima_time = time[lsq_utils.max_bool_func_1st_order_fd()] maxima_extrapolate = time_series_extended[utils_extended.max_bool_func_1st_order_fd()][-1] maxima_extrapolate_time = time_extended[utils_extended.max_bool_func_1st_order_fd()][-1] minima = lsq_signal[lsq_utils.min_bool_func_1st_order_fd()] minima_time = time[lsq_utils.min_bool_func_1st_order_fd()] minima_extrapolate = time_series_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] minima_extrapolate_time = time_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Single Neuron Neural Network Example') plt.plot(time, lsq_signal, zorder=2, label='Signal') plt.plot(time_extended, time_series_extended, c='g', zorder=1, label=textwrap.fill('Extrapolated signal', 12)) plt.scatter(maxima_time, maxima, c='r', zorder=3, label='Maxima') plt.scatter(minima_time, minima, c='b', zorder=3, label='Minima') plt.scatter(maxima_extrapolate_time, maxima_extrapolate, c='magenta', zorder=3, label=textwrap.fill('Extrapolated maxima', 12)) plt.scatter(minima_extrapolate_time, minima_extrapolate, c='cyan', zorder=4, label=textwrap.fill('Extrapolated minima', 12)) plt.plot(((time[-302] + time[-301]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k', label=textwrap.fill('Neural network inputs', 13)) plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(((time_extended[-1001] + time_extended[-1002]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k') plt.plot(((time[-202] + time[-201]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed', label=textwrap.fill('Neural network targets', 13)) plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2), ((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2), ((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='gray') plt.plot(((time_extended[-1001] + time_extended[-1000]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed') plt.xlim(3.4 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/neural_network.png') plt.show() # plot 6a np.random.seed(0) time = np.linspace(0, 5 * np.pi, 1001) knots_51 = np.linspace(0, 5 * np.pi, 51) time_series = np.cos(2 * time) + np.cos(4 * time) + np.cos(8 * time) noise = np.random.normal(0, 1, len(time_series)) time_series += noise advemdpy = EMD(time=time, time_series=time_series) imfs_51, hts_51, ifs_51 = advemdpy.empirical_mode_decomposition(knots=knots_51, max_imfs=3, edge_effect='symmetric_anchor', verbose=False)[:3] knots_31 = np.linspace(0, 5 * np.pi, 31) imfs_31, hts_31, ifs_31 = advemdpy.empirical_mode_decomposition(knots=knots_31, max_imfs=2, edge_effect='symmetric_anchor', verbose=False)[:3] knots_11 = np.linspace(0, 5 * np.pi, 11) imfs_11, hts_11, ifs_11 = advemdpy.empirical_mode_decomposition(knots=knots_11, max_imfs=1, edge_effect='symmetric_anchor', verbose=False)[:3] fig, axs = plt.subplots(3, 1) plt.suptitle(textwrap.fill('Comparison of Trends Extracted with Different Knot Sequences', 40)) plt.subplots_adjust(hspace=0.1) axs[0].plot(time, time_series, label='Time series') axs[0].plot(time, imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 1, IMF 2, & IMF 3 with 51 knots', 21)) print(f'DFA fluctuation with 51 knots: {np.round(np.var(time_series - (imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :])), 3)}') for knot in knots_51: axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[0].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[0].set_xticklabels(['', '', '', '', '', '']) axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[0].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[0].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].plot(time, time_series, label='Time series') axs[1].plot(time, imfs_31[1, :] + imfs_31[2, :], label=textwrap.fill('Sum of IMF 1 and IMF 2 with 31 knots', 19)) axs[1].plot(time, imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 2 and IMF 3 with 51 knots', 19)) print(f'DFA fluctuation with 31 knots: {np.round(np.var(time_series - (imfs_31[1, :] + imfs_31[2, :])), 3)}') for knot in knots_31: axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[1].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[1].set_xticklabels(['', '', '', '', '', '']) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) axs[1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[1].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[1].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[1].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') axs[2].plot(time, time_series, label='Time series') axs[2].plot(time, imfs_11[1, :], label='IMF 1 with 11 knots') axs[2].plot(time, imfs_31[2, :], label='IMF 2 with 31 knots') axs[2].plot(time, imfs_51[3, :], label='IMF 3 with 51 knots') print(f'DFA fluctuation with 11 knots: {np.round(np.var(time_series - imfs_51[3, :]), 3)}') for knot in knots_11: axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[2].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[2].set_xticklabels(['$0$', r'$\pi$', r'$2\pi$', r'$3\pi$', r'$4\pi$', r'$5\pi$']) box_2 = axs[2].get_position() axs[2].set_position([box_2.x0 - 0.05, box_2.y0, box_2.width * 0.85, box_2.height]) axs[2].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[2].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[2].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[2].plot(0.95 * np.pi *
np.ones(101)
numpy.ones
''' ------------------------------------------------------------------------------------------------- This code accompanies the paper titled "Human injury-based safety decision of automated vehicles" Author: <NAME>, <NAME>, <NAME>, <NAME> Corresponding author: <NAME> (<EMAIL>) ------------------------------------------------------------------------------------------------- ''' import torch import numpy as np from torch import nn from torch.nn.utils import weight_norm __author__ = "<NAME>" def Collision_cond(veh_striking_list, V1_v, V2_v, delta_angle, veh_param): ''' Estimate the collision condition. ''' (veh_l, veh_w, veh_cgf, veh_cgs, veh_k, veh_m) = veh_param delta_angle_2 = np.arccos(np.abs(np.cos(delta_angle))) if -1e-6 < delta_angle_2 < 1e-6: delta_angle_2 = 1e-6 delta_v1_list = [] delta_v2_list = [] # Estimate the collision condition (delat-v) according to the principal impact direction. for veh_striking in veh_striking_list: if veh_striking[0] == 1: veh_ca = np.arctan(veh_cgf[0] / veh_cgs[0]) veh_a2 = np.abs(veh_cgs[1] - veh_striking[3]) veh_RDS = np.abs(V1_v * np.cos(delta_angle) - V2_v) veh_a1 = np.abs(np.sqrt(veh_cgf[0] ** 2 + veh_cgs[0] ** 2) * np.cos(veh_ca + delta_angle_2)) if (veh_striking[1]+1) in [16, 1, 2, 3, 17, 20, 21] and (veh_striking[2]+1) in [16, 1, 2, 3, 17, 20, 21]: veh_e = 2 / veh_RDS else: veh_e = 0.5 / veh_RDS elif veh_striking[0] == 2: veh_ca = np.arctan(veh_cgf[0] / veh_cgs[0]) veh_a2 = np.abs(veh_cgf[1] - veh_striking[3]) veh_a1 = np.abs(np.sqrt(veh_cgf[0] ** 2 + veh_cgs[0] ** 2) * np.cos(delta_angle_2 - veh_ca + np.pi / 2)) veh_RDS = V1_v *
np.sin(delta_angle_2)
numpy.sin
import numpy as np from scipy import ndimage def erode_value_blobs(array, steps=1, values_to_ignore=tuple(), new_value=0): unique_values = list(np.unique(array)) all_entries_to_keep = np.zeros(shape=array.shape, dtype=np.bool) for unique_value in unique_values: entries_of_this_value = array == unique_value if unique_value in values_to_ignore: all_entries_to_keep = np.logical_or(entries_of_this_value, all_entries_to_keep) else: eroded_unique_indicator = ndimage.binary_erosion(entries_of_this_value, iterations=steps) all_entries_to_keep = np.logical_or(eroded_unique_indicator, all_entries_to_keep) result = array * all_entries_to_keep if new_value != 0: eroded_entries =
np.logical_not(all_entries_to_keep)
numpy.logical_not
from abc import ABCMeta, abstractmethod import os from vmaf.tools.misc import make_absolute_path, run_process from vmaf.tools.stats import ListStats __copyright__ = "Copyright 2016-2018, Netflix, Inc." __license__ = "Apache, Version 2.0" import re import numpy as np import ast from vmaf import ExternalProgramCaller, to_list from vmaf.config import VmafConfig, VmafExternalConfig from vmaf.core.executor import Executor from vmaf.core.result import Result from vmaf.tools.reader import YuvReader class FeatureExtractor(Executor): """ FeatureExtractor takes in a list of assets, and run feature extraction on them, and return a list of corresponding results. A FeatureExtractor must specify a unique type and version combination (by the TYPE and VERSION attribute), so that the Result generated by it can be identified. A derived class of FeatureExtractor must: 1) Override TYPE and VERSION 2) Override _generate_result(self, asset), which call a command-line executable and generate feature scores in a log file. 3) Override _get_feature_scores(self, asset), which read the feature scores from the log file, and return the scores in a dictionary format. For an example, follow VmafFeatureExtractor. """ __metaclass__ = ABCMeta @property @abstractmethod def ATOM_FEATURES(self): raise NotImplementedError def _read_result(self, asset): result = {} result.update(self._get_feature_scores(asset)) executor_id = self.executor_id return Result(asset, executor_id, result) @classmethod def get_scores_key(cls, atom_feature): return "{type}_{atom_feature}_scores".format( type=cls.TYPE, atom_feature=atom_feature) @classmethod def get_score_key(cls, atom_feature): return "{type}_{atom_feature}_score".format( type=cls.TYPE, atom_feature=atom_feature) def _get_feature_scores(self, asset): # routine to read the feature scores from the log file, and return # the scores in a dictionary format. log_file_path = self._get_log_file_path(asset) atom_feature_scores_dict = {} atom_feature_idx_dict = {} for atom_feature in self.ATOM_FEATURES: atom_feature_scores_dict[atom_feature] = [] atom_feature_idx_dict[atom_feature] = 0 with open(log_file_path, 'rt') as log_file: for line in log_file.readlines(): for atom_feature in self.ATOM_FEATURES: re_template = "{af}: ([0-9]+) ([a-zA-Z0-9.-]+)".format(af=atom_feature) mo = re.match(re_template, line) if mo: cur_idx = int(mo.group(1)) assert cur_idx == atom_feature_idx_dict[atom_feature] # parse value, allowing NaN and inf val = float(mo.group(2)) if np.isnan(val) or np.isinf(val): val = None atom_feature_scores_dict[atom_feature].append(val) atom_feature_idx_dict[atom_feature] += 1 continue len_score = len(atom_feature_scores_dict[self.ATOM_FEATURES[0]]) assert len_score != 0 for atom_feature in self.ATOM_FEATURES[1:]: assert len_score == len(atom_feature_scores_dict[atom_feature]), \ "Feature data possibly corrupt. Run cleanup script and try again." feature_result = {} for atom_feature in self.ATOM_FEATURES: scores_key = self.get_scores_key(atom_feature) feature_result[scores_key] = atom_feature_scores_dict[atom_feature] return feature_result class VmafFeatureExtractor(FeatureExtractor): TYPE = "VMAF_feature" # VERSION = '0.1' # vmaf_study; Anush's VIF fix # VERSION = '0.2' # expose vif_num, vif_den, adm_num, adm_den, anpsnr # VERSION = '0.2.1' # expose vif num/den of each scale # VERSION = '0.2.2' # adm abs-->fabs, corrected border handling, uniform reading with option of offset for input YUV, updated VIF corner case # VERSION = '0.2.2b' # expose adm_den/num_scalex # VERSION = '0.2.3' # AVX for VMAF convolution; update adm features by folding noise floor into per coef # VERSION = '0.2.4' # Fix a bug in adm feature passing scale into dwt_quant_step # VERSION = '0.2.4b' # Modify by adding ADM noise floor outside cube root; add derived feature motion2 VERSION = '0.2.4c' # Modify by moving motion2 to c code ATOM_FEATURES = ['vif', 'adm', 'ansnr', 'motion', 'motion2', 'vif_num', 'vif_den', 'adm_num', 'adm_den', 'anpsnr', 'vif_num_scale0', 'vif_den_scale0', 'vif_num_scale1', 'vif_den_scale1', 'vif_num_scale2', 'vif_den_scale2', 'vif_num_scale3', 'vif_den_scale3', 'adm_num_scale0', 'adm_den_scale0', 'adm_num_scale1', 'adm_den_scale1', 'adm_num_scale2', 'adm_den_scale2', 'adm_num_scale3', 'adm_den_scale3', ] DERIVED_ATOM_FEATURES = ['vif_scale0', 'vif_scale1', 'vif_scale2', 'vif_scale3', 'vif2', 'adm2', 'adm3', 'adm_scale0', 'adm_scale1', 'adm_scale2', 'adm_scale3', ] ADM2_CONSTANT = 0 ADM_SCALE_CONSTANT = 0 def _generate_result(self, asset): # routine to call the command-line executable and generate feature # scores in the log file. quality_width, quality_height = asset.quality_width_height log_file_path = self._get_log_file_path(asset) yuv_type=self._get_workfile_yuv_type(asset) ref_path=asset.ref_workfile_path dis_path=asset.dis_workfile_path w=quality_width h=quality_height logger = self.logger ExternalProgramCaller.call_vmaf_feature(yuv_type, ref_path, dis_path, w, h, log_file_path, logger) @classmethod def _post_process_result(cls, result): # override Executor._post_process_result result = super(VmafFeatureExtractor, cls)._post_process_result(result) # adm2 = # (adm_num + ADM2_CONSTANT) / (adm_den + ADM2_CONSTANT) adm2_scores_key = cls.get_scores_key('adm2') adm_num_scores_key = cls.get_scores_key('adm_num') adm_den_scores_key = cls.get_scores_key('adm_den') result.result_dict[adm2_scores_key] = list( (np.array(result.result_dict[adm_num_scores_key]) + cls.ADM2_CONSTANT) / (np.array(result.result_dict[adm_den_scores_key]) + cls.ADM2_CONSTANT) ) # vif_scalei = vif_num_scalei / vif_den_scalei, i = 0, 1, 2, 3 vif_num_scale0_scores_key = cls.get_scores_key('vif_num_scale0') vif_den_scale0_scores_key = cls.get_scores_key('vif_den_scale0') vif_num_scale1_scores_key = cls.get_scores_key('vif_num_scale1') vif_den_scale1_scores_key = cls.get_scores_key('vif_den_scale1') vif_num_scale2_scores_key = cls.get_scores_key('vif_num_scale2') vif_den_scale2_scores_key = cls.get_scores_key('vif_den_scale2') vif_num_scale3_scores_key = cls.get_scores_key('vif_num_scale3') vif_den_scale3_scores_key = cls.get_scores_key('vif_den_scale3') vif_scale0_scores_key = cls.get_scores_key('vif_scale0') vif_scale1_scores_key = cls.get_scores_key('vif_scale1') vif_scale2_scores_key = cls.get_scores_key('vif_scale2') vif_scale3_scores_key = cls.get_scores_key('vif_scale3') result.result_dict[vif_scale0_scores_key] = list( (np.array(result.result_dict[vif_num_scale0_scores_key]) / np.array(result.result_dict[vif_den_scale0_scores_key])) ) result.result_dict[vif_scale1_scores_key] = list( (np.array(result.result_dict[vif_num_scale1_scores_key]) / np.array(result.result_dict[vif_den_scale1_scores_key])) ) result.result_dict[vif_scale2_scores_key] = list( (np.array(result.result_dict[vif_num_scale2_scores_key]) / np.array(result.result_dict[vif_den_scale2_scores_key])) ) result.result_dict[vif_scale3_scores_key] = list( (np.array(result.result_dict[vif_num_scale3_scores_key]) / np.array(result.result_dict[vif_den_scale3_scores_key])) ) # vif2 = # ((vif_num_scale0 / vif_den_scale0) + (vif_num_scale1 / vif_den_scale1) + # (vif_num_scale2 / vif_den_scale2) + (vif_num_scale3 / vif_den_scale3)) / 4.0 vif_scores_key = cls.get_scores_key('vif2') result.result_dict[vif_scores_key] = list( ( (np.array(result.result_dict[vif_num_scale0_scores_key]) / np.array(result.result_dict[vif_den_scale0_scores_key])) + (np.array(result.result_dict[vif_num_scale1_scores_key]) / np.array(result.result_dict[vif_den_scale1_scores_key])) + (np.array(result.result_dict[vif_num_scale2_scores_key]) / np.array(result.result_dict[vif_den_scale2_scores_key])) + (np.array(result.result_dict[vif_num_scale3_scores_key]) / np.array(result.result_dict[vif_den_scale3_scores_key])) ) / 4.0 ) # adm_scalei = adm_num_scalei / adm_den_scalei, i = 0, 1, 2, 3 adm_num_scale0_scores_key = cls.get_scores_key('adm_num_scale0') adm_den_scale0_scores_key = cls.get_scores_key('adm_den_scale0') adm_num_scale1_scores_key = cls.get_scores_key('adm_num_scale1') adm_den_scale1_scores_key = cls.get_scores_key('adm_den_scale1') adm_num_scale2_scores_key = cls.get_scores_key('adm_num_scale2') adm_den_scale2_scores_key = cls.get_scores_key('adm_den_scale2') adm_num_scale3_scores_key = cls.get_scores_key('adm_num_scale3') adm_den_scale3_scores_key = cls.get_scores_key('adm_den_scale3') adm_scale0_scores_key = cls.get_scores_key('adm_scale0') adm_scale1_scores_key = cls.get_scores_key('adm_scale1') adm_scale2_scores_key = cls.get_scores_key('adm_scale2') adm_scale3_scores_key = cls.get_scores_key('adm_scale3') result.result_dict[adm_scale0_scores_key] = list( (np.array(result.result_dict[adm_num_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT) / (np.array(result.result_dict[adm_den_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT) ) result.result_dict[adm_scale1_scores_key] = list( (np.array(result.result_dict[adm_num_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT) / (np.array(result.result_dict[adm_den_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT) ) result.result_dict[adm_scale2_scores_key] = list( (np.array(result.result_dict[adm_num_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT) / (np.array(result.result_dict[adm_den_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT) ) result.result_dict[adm_scale3_scores_key] = list( (np.array(result.result_dict[adm_num_scale3_scores_key]) + cls.ADM_SCALE_CONSTANT) / (np.array(result.result_dict[adm_den_scale3_scores_key]) + cls.ADM_SCALE_CONSTANT) ) # adm3 = \ # (((adm_num_scale0 + ADM_SCALE_CONSTANT) / (adm_den_scale0 + ADM_SCALE_CONSTANT)) # + ((adm_num_scale1 + ADM_SCALE_CONSTANT) / (adm_den_scale1 + ADM_SCALE_CONSTANT)) # + ((adm_num_scale2 + ADM_SCALE_CONSTANT) / (adm_den_scale2 + ADM_SCALE_CONSTANT)) # + ((adm_num_scale3 + ADM_SCALE_CONSTANT) / (adm_den_scale3 + ADM_SCALE_CONSTANT))) / 4.0 adm3_scores_key = cls.get_scores_key('adm3') result.result_dict[adm3_scores_key] = list( ( ((np.array(result.result_dict[adm_num_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT) / (np.array(result.result_dict[adm_den_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT)) + ((np.array(result.result_dict[adm_num_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT) / (np.array(result.result_dict[adm_den_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT)) + ((np.array(result.result_dict[adm_num_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT) / (
np.array(result.result_dict[adm_den_scale2_scores_key])
numpy.array
import numpy as np import tensorflow as tf H = 2 N = 2 M = 3 BS = 10 def my_softmax(arr): max_elements = np.reshape(np.max(arr, axis = 2), (BS, N, 1)) arr = arr - max_elements exp_array = np.exp(arr) print (exp_array) sum_array = np.reshape(np.sum(exp_array, axis=2), (BS, N, 1)) return exp_array /sum_array def masked_softmax(logits, mask, dim): """ Takes masked softmax over given dimension of logits. Inputs: logits: Numpy array. We want to take softmax over dimension dim. mask: Numpy array of same shape as logits. Has 1s where there's real data in logits, 0 where there's padding dim: int. dimension over which to take softmax Returns: masked_logits: Numpy array same shape as logits. This is the same as logits, but with 1e30 subtracted (i.e. very large negative number) in the padding locations. prob_dist: Numpy array same shape as logits. The result of taking softmax over masked_logits in given dimension. Should be 0 in padding locations. Should sum to 1 over given dimension. """ exp_mask = (1 - tf.cast(mask, 'float64')) * (-1e30) # -large where there's padding, 0 elsewhere print (exp_mask) masked_logits = tf.add(logits, exp_mask) # where there's padding, set logits to -large prob_dist = tf.nn.softmax(masked_logits, dim) return masked_logits, prob_dist def test_build_similarity(contexts, questions): w_sim_1 = tf.get_variable('w_sim_1', initializer=w_1) # 2 * H w_sim_2 = tf.get_variable('w_sim_2', initializer=w_2) # 2 * self.hidden_size w_sim_3 = tf.get_variable('w_sim_3', initializer=w_3) # 2 * self.hidden_size q_tile = tf.tile(tf.expand_dims(questions, 0), [N, 1, 1, 1]) # N x BS x M x 2H q_tile = tf.transpose(q_tile, (1, 0, 3, 2)) # BS x N x 2H x M contexts = tf.expand_dims(contexts, -1) # BS x N x 2H x 1 result = (contexts * q_tile) # BS x N x 2H x M tf.assert_equal(tf.shape(result), [BS, N, 2 * H, M]) result = tf.transpose(result, (0, 1, 3, 2)) # BS x N x M x 2H result = tf.reshape(result, (-1, N * M, 2 * H)) # BS x (NxM) x 2H tf.assert_equal(tf.shape(result), [BS, N*M, 2*H]) # w_sim_1 = tf.tile(tf.expand_dims(w_sim_1, 0), [BS, 1]) # w_sim_2 = tf.tile(tf.expand_dims(w_sim_2, 0), [BS, 1]) # w_sim_3 = tf.tile(tf.expand_dims(w_sim_3, 0), [BS, 1]) term1 = tf.matmul(tf.reshape(contexts, (BS * N, 2*H)), tf.expand_dims(w_sim_1, -1)) # BS x N term1 = tf.reshape(term1, (-1, N)) term2 = tf.matmul(tf.reshape(questions, (BS * M, 2*H)), tf.expand_dims(w_sim_2, -1)) # BS x M term2 = tf.reshape(term2, (-1, M)) term3 = tf.matmul(tf.reshape(result, (BS * N * M, 2* H)), tf.expand_dims(w_sim_3, -1)) term3 = tf.reshape(term3, (-1, N, M)) # BS x N x M S = tf.reshape(term1,(-1, N, 1)) + term3 + tf.reshape(term2, (-1, 1, M)) return S def test_build_sim_mask(): context_mask = np.array([True, True]) # BS x N question_mask =
np.array([True, True, False])
numpy.array
from __future__ import absolute_import from __future__ import division from __future__ import print_function import cntk as C import numpy as np from .common import floatx, epsilon, image_dim_ordering, image_data_format from collections import defaultdict from contextlib import contextmanager import warnings C.set_global_option('align_axis', 1) b_any = any dev = C.device.use_default_device() if dev.type() == 0: warnings.warn( 'CNTK backend warning: GPU is not detected. ' 'CNTK\'s CPU version is not fully optimized,' 'please run with GPU to get better performance.') # A learning phase is a bool tensor used to run Keras models in # either train mode (learning_phase == 1) or test mode (learning_phase == 0). # LEARNING_PHASE_PLACEHOLDER is the placeholder for dynamic learning phase _LEARNING_PHASE_PLACEHOLDER = C.constant(shape=(), dtype=np.float32, value=1.0, name='_keras_learning_phase') # static learning phase flag, if it is not 0 or 1, we will go with dynamic learning phase tensor. _LEARNING_PHASE = -1 _UID_PREFIXES = defaultdict(int) # cntk doesn't support gradient as symbolic op, to hook up with keras model, # we will create gradient as a constant placeholder, here use this global # map to keep the mapping from grad placeholder to parameter grad_parameter_dict = {} NAME_SCOPE_STACK = [] @contextmanager def name_scope(name): global NAME_SCOPE_STACK NAME_SCOPE_STACK.append(name) yield NAME_SCOPE_STACK.pop() def get_uid(prefix=''): _UID_PREFIXES[prefix] += 1 return _UID_PREFIXES[prefix] def learning_phase(): # If _LEARNING_PHASE is not 0 or 1, return dynamic learning phase tensor return _LEARNING_PHASE if _LEARNING_PHASE in {0, 1} else _LEARNING_PHASE_PLACEHOLDER def set_learning_phase(value): global _LEARNING_PHASE if value not in {0, 1}: raise ValueError('CNTK Backend: Set learning phase ' 'with value %s is not supported, ' 'expected 0 or 1.' % value) _LEARNING_PHASE = value def clear_session(): """Reset learning phase flag for cntk backend. """ global _LEARNING_PHASE global _LEARNING_PHASE_PLACEHOLDER _LEARNING_PHASE = -1 _LEARNING_PHASE_PLACEHOLDER.value = np.asarray(1.0) def in_train_phase(x, alt, training=None): global _LEARNING_PHASE if training is None: training = learning_phase() uses_learning_phase = True else: uses_learning_phase = False # CNTK currently don't support cond op, so here we use # element_select approach as workaround. It may have # perf issue, will resolve it later with cntk cond op. if callable(x) and isinstance(x, C.cntk_py.Function) is False: x = x() if callable(alt) and isinstance(alt, C.cntk_py.Function) is False: alt = alt() if training is True: x._uses_learning_phase = uses_learning_phase return x else: # if _LEARNING_PHASE is static if isinstance(training, int) or isinstance(training, bool): result = x if training == 1 or training is True else alt else: result = C.element_select(training, x, alt) result._uses_learning_phase = uses_learning_phase return result def in_test_phase(x, alt, training=None): return in_train_phase(alt, x, training=training) def _convert_string_dtype(dtype): # cntk only support float32 and float64 if dtype == 'float32': return np.float32 elif dtype == 'float64': return np.float64 else: # cntk only running with float, # try to cast to float to run the model return np.float32 def _convert_dtype_string(dtype): if dtype == np.float32: return 'float32' elif dtype == np.float64: return 'float64' else: raise ValueError('CNTK Backend: Unsupported dtype: %s. ' 'CNTK only supports float32 and ' 'float64.' % dtype) def variable(value, dtype=None, name=None, constraint=None): """Instantiates a variable and returns it. # Arguments value: Numpy array, initial value of the tensor. dtype: Tensor type. name: Optional name string for the tensor. constraint: Optional projection function to be applied to the variable after an optimizer update. # Returns A variable instance (with Keras metadata included). """ if dtype is None: dtype = floatx() if name is None: name = '' if isinstance( value, C.variables.Constant) or isinstance( value, C.variables.Parameter): value = value.value # we don't support init parameter with symbolic op, so eval it first as # workaround if isinstance(value, C.cntk_py.Function): value = eval(value) shape = value.shape if hasattr(value, 'shape') else () if hasattr(value, 'dtype') and value.dtype != dtype and len(shape) > 0: value = value.astype(dtype) # TODO: remove the conversion when cntk supports int32, int64 # https://docs.microsoft.com/en-us/python/api/cntk.variables.parameter dtype = 'float32' if 'int' in str(dtype) else dtype v = C.parameter(shape=shape, init=value, dtype=dtype, name=_prepare_name(name, 'variable')) v._keras_shape = v.shape v._uses_learning_phase = False v.constraint = constraint return v def bias_add(x, bias, data_format=None): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) dims = len(x.shape) if dims > 0 and x.shape[0] == C.InferredDimension: dims -= 1 bias_dims = len(bias.shape) if bias_dims != 1 and bias_dims != dims: raise ValueError('Unexpected bias dimensions %d, ' 'expected 1 or %d dimensions' % (bias_dims, dims)) if dims == 4: if data_format == 'channels_first': if bias_dims == 1: shape = (bias.shape[0], 1, 1, 1) else: shape = (bias.shape[3],) + bias.shape[:3] elif data_format == 'channels_last': if bias_dims == 1: shape = (1, 1, 1, bias.shape[0]) else: shape = bias.shape elif dims == 3: if data_format == 'channels_first': if bias_dims == 1: shape = (bias.shape[0], 1, 1) else: shape = (bias.shape[2],) + bias.shape[:2] elif data_format == 'channels_last': if bias_dims == 1: shape = (1, 1, bias.shape[0]) else: shape = bias.shape elif dims == 2: if data_format == 'channels_first': if bias_dims == 1: shape = (bias.shape[0], 1) else: shape = (bias.shape[1],) + bias.shape[:1] elif data_format == 'channels_last': if bias_dims == 1: shape = (1, bias.shape[0]) else: shape = bias.shape else: shape = bias.shape return x + reshape(bias, shape) def eval(x): if isinstance(x, C.cntk_py.Function): return x.eval() elif isinstance(x, C.variables.Constant) or isinstance(x, C.variables.Parameter): return x.value else: raise ValueError('CNTK Backend: `eval` method on ' '`%s` type is not supported. ' 'CNTK only supports `eval` with ' '`Function`, `Constant` or ' '`Parameter`.' % type(x)) def placeholder( shape=None, ndim=None, dtype=None, sparse=False, name=None, dynamic_axis_num=1): if dtype is None: dtype = floatx() if not shape: if ndim: shape = tuple([None for _ in range(ndim)]) dynamic_dimension = C.FreeDimension if _get_cntk_version() >= 2.2 else C.InferredDimension cntk_shape = [dynamic_dimension if s is None else s for s in shape] cntk_shape = tuple(cntk_shape) if dynamic_axis_num > len(cntk_shape): raise ValueError('CNTK backend: creating placeholder with ' '%d dimension is not supported, at least ' '%d dimensions are needed.' % (len(cntk_shape, dynamic_axis_num))) if name is None: name = '' cntk_shape = cntk_shape[dynamic_axis_num:] x = C.input( shape=cntk_shape, dtype=_convert_string_dtype(dtype), is_sparse=sparse, name=name) x._keras_shape = shape x._uses_learning_phase = False x._cntk_placeholder = True return x def is_placeholder(x): """Returns whether `x` is a placeholder. # Arguments x: A candidate placeholder. # Returns Boolean. """ return hasattr(x, '_cntk_placeholder') and x._cntk_placeholder def is_keras_tensor(x): if not is_tensor(x): raise ValueError('Unexpectedly found an instance of type `' + str(type(x)) + '`. ' 'Expected a symbolic tensor instance.') return hasattr(x, '_keras_history') def is_tensor(x): return isinstance(x, (C.variables.Constant, C.variables.Variable, C.variables.Parameter, C.ops.functions.Function)) def shape(x): shape = list(int_shape(x)) num_dynamic = _get_dynamic_axis_num(x) non_dyn_shape = [] for i in range(len(x.shape)): if shape[i + num_dynamic] is None: non_dyn_shape.append(x.shape[i]) else: non_dyn_shape.append(shape[i + num_dynamic]) return shape[:num_dynamic] + non_dyn_shape def is_sparse(tensor): return tensor.is_sparse def int_shape(x): if hasattr(x, '_keras_shape'): return x._keras_shape shape = x.shape if hasattr(x, 'dynamic_axes'): dynamic_shape = [None for a in x.dynamic_axes] shape = tuple(dynamic_shape) + shape return shape def ndim(x): shape = int_shape(x) return len(shape) def _prepare_name(name, default): prefix = '_'.join(NAME_SCOPE_STACK) if name is None or name == '': return prefix + '/' + default return prefix + '/' + name def constant(value, dtype=None, shape=None, name=None): if dtype is None: dtype = floatx() if shape is None: shape = () np_value = value * np.ones(shape) const = C.constant(np_value, dtype=dtype, name=_prepare_name(name, 'constant')) const._keras_shape = const.shape const._uses_learning_phase = False return const def random_binomial(shape, p=0.0, dtype=None, seed=None): # use numpy workaround now if seed is None: # ensure that randomness is conditioned by the Numpy RNG seed = np.random.randint(10e7) np.random.seed(seed) if dtype is None: dtype = np.float32 else: dtype = _convert_string_dtype(dtype) size = 1 for _ in shape: if _ is None: raise ValueError('CNTK Backend: randomness op with ' 'dynamic shape is not supported now. ' 'Please provide fixed dimension ' 'instead of `None`.') size *= _ binomial = np.random.binomial(1, p, size).astype(dtype).reshape(shape) return variable(value=binomial, dtype=dtype) def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None): for _ in shape: if _ is None: raise ValueError('CNTK Backend: randomness op with ' 'dynamic shape is not supported now. ' 'Please provide fixed dimension ' 'instead of `None`.') return random_uniform_variable(shape, minval, maxval, dtype, seed) def random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None): if dtype is None: dtype = floatx() if seed is None: # ensure that randomness is conditioned by the Numpy RNG seed = np.random.randint(10e3) if dtype is None: dtype = np.float32 else: dtype = _convert_string_dtype(dtype) if name is None: name = '' scale = (high - low) / 2 p = C.parameter( shape, init=C.initializer.uniform( scale, seed=seed), dtype=dtype, name=name) return variable(value=p.value + low + scale) def random_normal_variable( shape, mean, scale, dtype=None, name=None, seed=None): if dtype is None: dtype = floatx() if seed is None: # ensure that randomness is conditioned by the Numpy RNG seed = np.random.randint(10e7) if dtype is None: dtype = np.float32 else: dtype = _convert_string_dtype(dtype) if name is None: name = '' return C.parameter( shape=shape, init=C.initializer.normal( scale=scale, seed=seed), dtype=dtype, name=name) def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): if dtype is None: dtype = floatx() for _ in shape: if _ is None: raise ValueError('CNTK Backend: randomness op with ' 'dynamic shape is not supported now. ' 'Please provide fixed dimension ' 'instead of `None`.') # how to apply mean and stddev return random_normal_variable(shape=shape, mean=mean, scale=1.0, seed=seed) def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): if seed is None: seed = np.random.randint(1, 10e6) if dtype is None: dtype = np.float32 else: dtype = _convert_string_dtype(dtype) return C.parameter( shape, init=C.initializer.truncated_normal( stddev, seed=seed), dtype=dtype) def dtype(x): return _convert_dtype_string(x.dtype) def zeros(shape, dtype=None, name=None): if dtype is None: dtype = floatx() ctype = _convert_string_dtype(dtype) return variable(value=np.zeros(shape, ctype), dtype=dtype, name=name) def ones(shape, dtype=None, name=None): if dtype is None: dtype = floatx() ctype = _convert_string_dtype(dtype) return variable(value=np.ones(shape, ctype), dtype=dtype, name=name) def eye(size, dtype=None, name=None): if dtype is None: dtype = floatx() return variable(np.eye(size), dtype, name) def zeros_like(x, dtype=None, name=None): return x * 0 def ones_like(x, dtype=None, name=None): return zeros_like(x) + 1 def count_params(x): for _ in x.shape: if _ == C.InferredDimension or _ == C.FreeDimension: raise ValueError('CNTK backend: `count_params` with dynamic ' 'shape is not supported. Please provide ' 'fixed dimension instead of `None`.') return np.prod(int_shape(x)) def cast(x, dtype): # cntk calculate everything in float, so don't need case from bool / int return x def dot(x, y): if len(x.shape) > 2 or len(y.shape) > 2: y_shape = int_shape(y) if len(y_shape) > 2: permutation = [len(y_shape) - 2] permutation += list(range(len(y_shape) - 2)) permutation += [len(y_shape) - 1] y = C.transpose(y, perm=permutation) return C.times(x, y, len(y_shape) - 1) else: return C.times(x, y) def batch_dot(x, y, axes=None): x_shape = int_shape(x) y_shape = int_shape(y) if isinstance(axes, int): axes = (axes, axes) if axes is None: # behaves like tf.batch_matmul as default axes = [len(x_shape) - 1, len(y_shape) - 2] if b_any([isinstance(a, (list, tuple)) for a in axes]): raise ValueError('Multiple target dimensions are not supported. ' + 'Expected: None, int, (int, int), ' + 'Provided: ' + str(axes)) if len(x_shape) == 2 and len(y_shape) == 2: if axes[0] == axes[1]: result = sum(x * y, axis=axes[0], keepdims=True) return result if axes[0] == 1 else transpose(result) else: return sum(x * transpose(y), axis=axes[0], keepdims=True) else: if len(y_shape) == 2: y = expand_dims(y) normalized_axis = [] normalized_axis.append(_normalize_axis(axes[0], x)[0]) normalized_axis.append(_normalize_axis(axes[1], y)[0]) # transpose i = normalized_axis[0] while i < len(x.shape) - 1: x = C.swapaxes(x, i, i + 1) i += 1 i = normalized_axis[1] while i > 0: y = C.swapaxes(y, i, i - 1) i -= 1 result = C.times(x, y, output_rank=(len(y.shape) - 1) if len(y.shape) > 1 else 1) if len(y_shape) == 2: result = squeeze(result, -1) return result def transpose(x): return C.swapaxes(x, 0, 1) def gather(reference, indices): # There is a bug in cntk gather op which may cause crash. # We have made a fix but not catched in CNTK 2.1 release. # Will update with gather op in next release if _get_cntk_version() >= 2.2: return C.ops.gather(reference, indices) else: num_classes = reference.shape[0] one_hot_matrix = C.ops.one_hot(indices, num_classes) return C.times(one_hot_matrix, reference, output_rank=len(reference.shape) - 1) def _remove_dims(x, axis, keepdims=False): if keepdims is False and isinstance(axis, list): # sequence axis is removed by default, so don't need reshape on it reduce_axes = [] for a in axis: if isinstance(a, C.Axis) is False: reduce_axes.append(a) return _reshape_dummy_dim(x, reduce_axes) else: if isinstance(axis, list): has_seq = False for a in axis: if isinstance(a, C.Axis): has_seq = True break if has_seq: nones = _get_dynamic_axis_num(x) x = expand_dims(x, nones) return x def max(x, axis=None, keepdims=False): axis = _normalize_axis(axis, x) output = _reduce_on_axis(x, axis, 'reduce_max') return _remove_dims(output, axis, keepdims) def min(x, axis=None, keepdims=False): axis = _normalize_axis(axis, x) output = _reduce_on_axis(x, axis, 'reduce_min') return _remove_dims(output, axis, keepdims) def sum(x, axis=None, keepdims=False): axis = _normalize_axis(axis, x) output = _reduce_on_axis(x, axis, 'reduce_sum') return _remove_dims(output, axis, keepdims) def prod(x, axis=None, keepdims=False): axis = _normalize_axis(axis, x) output = _reduce_on_axis(x, axis, 'reduce_prod') return _remove_dims(output, axis, keepdims) def logsumexp(x, axis=None, keepdims=False): return log(sum(exp(x), axis=axis, keepdims=keepdims)) def var(x, axis=None, keepdims=False): m = mean(x, axis, keepdims=True) devs_squared = C.square(x - m) return mean(devs_squared, axis=axis, keepdims=keepdims) def std(x, axis=None, keepdims=False): return C.sqrt(var(x, axis=axis, keepdims=keepdims)) def expand_dims(x, axis=-1): shape = list(int_shape(x)) nones = _get_dynamic_axis_num(x) index = axis if axis >= 0 else len(shape) + 1 shape.insert(index, 1) new_shape = shape[nones:] new_shape = tuple( [C.InferredDimension if _ is None else _ for _ in new_shape]) result = C.reshape(x, new_shape) if index < nones: result._keras_shape = shape return result def squeeze(x, axis): if isinstance(axis, tuple): axis = list(axis) if not isinstance(axis, list): axis = [axis] shape = list(int_shape(x)) _axis = [] for _ in axis: if isinstance(_, int): _axis.append(_ if _ >= 0 else _ + len(shape)) if len(_axis) == 0: return x nones = _get_dynamic_axis_num(x) for _ in sorted(_axis, reverse=True): del shape[_] new_shape = shape[nones:] new_shape = tuple([C.InferredDimension if _ == C.FreeDimension else _ for _ in new_shape]) return C.reshape(x, new_shape) def tile(x, n): if isinstance(n, int): n = (n,) elif isinstance(n, list): n = tuple(n) shape = int_shape(x) num_dynamic_axis = _get_dynamic_axis_num(x) # Padding the axis if len(n) < len(shape): n = tuple([1 for _ in range(len(shape) - len(n))]) + n if len(n) != len(shape): raise NotImplementedError i = num_dynamic_axis for i, rep in enumerate(n): if i >= num_dynamic_axis and shape[i] is not None: tmp = [x] * rep x = C.splice(*tmp, axis=i - num_dynamic_axis) i += 1 return x def _normalize_axis(axis, x): shape = int_shape(x) ndim = len(shape) nones = _get_dynamic_axis_num(x) if nones > ndim: raise ValueError('CNTK Backend: tensor with keras shape: `%s` has ' '%d cntk dynamic axis, this is not expected, please ' 'double check the keras shape history.' % (str(shape), nones)) # Current cntk does not support shape like (1, batch). so using the workaround # here to mapping the correct axis. Will remove this tricky after we add support # in native cntk op cntk_axis = [] dynamic_axis_index = 0 for i in range(ndim): if shape[i] is None and dynamic_axis_index < nones: cntk_axis.append(x.dynamic_axes[dynamic_axis_index]) dynamic_axis_index += 1 else: cntk_axis.append(i - dynamic_axis_index) if dynamic_axis_index < nones: i = 0 while dynamic_axis_index < nones: cntk_axis[i] = x.dynamic_axes[dynamic_axis_index] i += 1 dynamic_axis_index += 1 while i < len(cntk_axis): cntk_axis[i] -= nones i += 1 if isinstance(axis, tuple): _axis = list(axis) elif isinstance(axis, int): _axis = [axis] elif isinstance(axis, list): _axis = list(axis) else: _axis = axis if isinstance(_axis, list): for i, a in enumerate(_axis): if a is not None and a < 0: _axis[i] = (a % ndim) if _axis[i] is not None: _axis[i] = cntk_axis[_axis[i]] else: if _axis is None: _axis = C.Axis.all_axes() return _axis def _reshape_dummy_dim(x, axis): shape = list(x.shape) _axis = [_ + len(shape) if _ < 0 else _ for _ in axis] if shape.count(C.InferredDimension) > 1 or shape.count(C.FreeDimension) > 1: result = x for index in sorted(_axis, reverse=True): result = C.reshape(result, shape=(), begin_axis=index, end_axis=index + 1) return result else: for index in sorted(_axis, reverse=True): del shape[index] shape = [C.InferredDimension if _ == C.FreeDimension else _ for _ in shape] return C.reshape(x, shape) def mean(x, axis=None, keepdims=False): axis = _normalize_axis(axis, x) output = _reduce_on_axis(x, axis, 'reduce_mean') return _remove_dims(output, axis, keepdims) def any(x, axis=None, keepdims=False): reduce_result = sum(x, axis, keepdims=keepdims) any_matrix = C.element_select( reduce_result, ones_like(reduce_result), zeros_like(reduce_result)) if len(reduce_result.shape) == 0 and _get_dynamic_axis_num(x) == 0: return C.reduce_sum(any_matrix) else: return any_matrix def all(x, axis=None, keepdims=False): reduce_result = prod(x, axis, keepdims=keepdims) all_matrix = C.element_select( reduce_result, ones_like(reduce_result), zeros_like(reduce_result)) if len(reduce_result.shape) == 0 and _get_dynamic_axis_num(x) == 0: return C.reduce_sum(all_matrix) else: return all_matrix def classification_error(target, output, axis=-1): return C.ops.reduce_mean( C.equal( argmax( output, axis=-1), argmax( target, axis=-1)), axis=C.Axis.all_axes()) def argmax(x, axis=-1): axis = [axis] axis = _normalize_axis(axis, x) output = C.ops.argmax(x, axis=axis[0]) return _reshape_dummy_dim(output, axis) def argmin(x, axis=-1): axis = [axis] axis = _normalize_axis(axis, x) output = C.ops.argmin(x, axis=axis[0]) return _reshape_dummy_dim(output, axis) def square(x): return C.square(x) def abs(x): return C.abs(x) def sqrt(x): return C.sqrt(x) def exp(x): return C.exp(x) def log(x): return C.log(x) def round(x): return C.round(x) def sigmoid(x): return C.sigmoid(x) def sign(x): return x / C.abs(x) def pow(x, a): return C.pow(x, a) def clip(x, min_value, max_value): if max_value is not None and max_value < min_value: max_value = min_value if max_value is None: max_value = np.inf if min_value is None: min_value = -np.inf return C.clip(x, min_value, max_value) def binary_crossentropy(target, output, from_logits=False): if from_logits: output = C.sigmoid(output) output = C.clip(output, epsilon(), 1.0 - epsilon()) output = -target * C.log(output) - (1.0 - target) * C.log(1.0 - output) return output def get_variable_shape(x): return int_shape(x) def update(x, new_x): return C.assign(x, new_x) def moving_average_update(variable, value, momentum): return C.assign(variable, variable * momentum + value * (1. - momentum)) def update_add(x, increment): result = x + increment return C.assign(x, result) def gradients(loss, variables): # cntk does not support gradients as symbolic op, # to hook up with keras model # we will return a constant as place holder, the cntk learner will apply # the gradient during training. global grad_parameter_dict if isinstance(variables, list) is False: variables = [variables] grads = [] for v in variables: g = C.constant(0, shape=v.shape, name='keras_grad_placeholder') grads.append(g) grad_parameter_dict[g] = v return grads def equal(x, y): return C.equal(x, y) def not_equal(x, y): return C.not_equal(x, y) def greater(x, y): return C.greater(x, y) def greater_equal(x, y): return C.greater_equal(x, y) def less(x, y): return C.less(x, y) def less_equal(x, y): return C.less_equal(x, y) def maximum(x, y): return C.element_max(x, y) def minimum(x, y): return C.element_min(x, y) def sin(x): return C.sin(x) def cos(x): return C.cos(x) def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3): if gamma is None: if beta is None: gamma = ones_like(x) else: gamma = ones_like(beta) if beta is None: if gamma is None: beta = zeros_like(x) else: beta = zeros_like(gamma) mean, variant = _moments(x, _normalize_axis(reduction_axes, x)) if sorted(reduction_axes) == list(range(ndim(x)))[:-1]: normalized = batch_normalization( x, mean, variant, beta, gamma, epsilon) else: # need broadcasting target_shape = [] x_shape = int_shape(x) # skip the batch axis for axis in range(1, ndim(x)): if axis in reduction_axes: target_shape.append(1) if ndim(gamma) > axis: gamma = C.reduce_mean(gamma, axis - 1) beta = C.reduce_mean(beta, axis - 1) else: target_shape.append(x_shape[axis]) broadcast_mean = C.reshape(mean, target_shape) broadcast_var = C.reshape(variant, target_shape) broadcast_gamma = C.reshape(gamma, target_shape) broadcast_beta = C.reshape(beta, target_shape) normalized = batch_normalization( x, broadcast_mean, broadcast_var, broadcast_beta, broadcast_gamma, epsilon) return normalized, mean, variant def _moments(x, axes=None, shift=None, keep_dims=False): _axes = tuple(axes) if shift is None: shift = x # Compute true mean while keeping the dims for proper broadcasting. for axis in _axes: shift = C.reduce_mean(shift, axis=axis) shift = C.stop_gradient(shift) shifted_mean = C.minus(x, shift) for axis in _axes: shifted_mean = C.reduce_mean(shifted_mean, axis=axis) variance_mean = C.square(C.minus(x, shift)) for axis in _axes: variance_mean = C.reduce_mean(variance_mean, axis=axis) variance = C.minus(variance_mean, C.square(shifted_mean)) mean = C.plus(shifted_mean, shift) if not keep_dims: mean = squeeze(mean, _axes) variance = squeeze(variance, _axes) return mean, variance def batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3): # The mean / var / beta / gamma may be processed by broadcast # so it may have an extra batch axis with 1, it is not needed # in cntk, need to remove those dummy axis. if ndim(mean) == ndim(x) and shape(mean)[0] == 1: mean = _reshape_dummy_dim(mean, [0]) if ndim(var) == ndim(x) and shape(var)[0] == 1: var = _reshape_dummy_dim(var, [0]) if gamma is None: gamma = ones_like(var) elif ndim(gamma) == ndim(x) and shape(gamma)[0] == 1: gamma = _reshape_dummy_dim(gamma, [0]) if beta is None: beta = zeros_like(mean) elif ndim(beta) == ndim(x) and shape(beta)[0] == 1: beta = _reshape_dummy_dim(beta, [0]) return (x - mean) / (C.sqrt(var) + epsilon) * gamma + beta def concatenate(tensors, axis=-1): if len(tensors) == 0: return None axis = [axis] axis = _normalize_axis(axis, tensors[0]) return C.splice(*tensors, axis=axis[0]) def flatten(x): return reshape(x, (-1,)) def reshape(x, shape): shape = tuple([C.InferredDimension if _ == C.FreeDimension else _ for _ in shape]) if isinstance(x, C.variables.Parameter): return C.reshape(x, shape) else: num_dynamic_axis = _get_dynamic_axis_num(x) if num_dynamic_axis == 1 and len(shape) > 0 and shape[0] == -1: # collapse axis with batch axis if b_any(_ == C.InferredDimension for _ in x.shape) or b_any( _ == C.FreeDimension for _ in x.shape): warnings.warn( 'Warning: CNTK backend does not support ' 'collapse of batch axis with inferred dimension. ' 'The reshape did not take place.') return x return _reshape_batch(x, shape) else: # no collapse, then first need to padding the shape if num_dynamic_axis >= len(shape): i = 0 while i < len(shape): if shape[i] is None or shape[i] == -1: i += 1 else: break shape = tuple([-1 for _ in range(num_dynamic_axis - i)]) + shape new_shape = list(shape) new_shape = new_shape[num_dynamic_axis:] new_shape = [C.InferredDimension if _ is None else _ for _ in new_shape] return C.reshape(x, new_shape) def permute_dimensions(x, pattern): dims = len(int_shape(x)) num_dynamic_axis = _get_dynamic_axis_num(x) if isinstance(pattern, list): current_layout = [i for i in range(dims)] else: current_layout = tuple([i for i in range(dims)]) if num_dynamic_axis > 0 and pattern[:num_dynamic_axis] != current_layout[:num_dynamic_axis]: raise ValueError('CNTK backend: the permute pattern %s ' 'requested permute on dynamic axis, ' 'which is not supported. Please do permute ' 'on static axis.' % pattern) axis = list(pattern) axis = axis[num_dynamic_axis:] axis = _normalize_axis(axis, x) return C.transpose(x, axis) def resize_images(x, height_factor, width_factor, data_format): if data_format == 'channels_first': output = repeat_elements(x, height_factor, axis=2) output = repeat_elements(output, width_factor, axis=3) return output elif data_format == 'channels_last': output = repeat_elements(x, height_factor, axis=1) output = repeat_elements(output, width_factor, axis=2) return output else: raise ValueError('CNTK Backend: Invalid data_format:', data_format) def resize_volumes(x, depth_factor, height_factor, width_factor, data_format): if data_format == 'channels_first': output = repeat_elements(x, depth_factor, axis=2) output = repeat_elements(output, height_factor, axis=3) output = repeat_elements(output, width_factor, axis=4) return output elif data_format == 'channels_last': output = repeat_elements(x, depth_factor, axis=1) output = repeat_elements(output, height_factor, axis=2) output = repeat_elements(output, width_factor, axis=3) return output else: raise ValueError('CNTK Backend: Invalid data_format:', data_format) def repeat_elements(x, rep, axis): axis = _normalize_axis(axis, x) axis = axis[0] slices = [] shape = x.shape i = 0 while i < shape[axis]: tmp = C.ops.slice(x, axis, i, i + 1) for _ in range(rep): slices.append(tmp) i += 1 return C.splice(*slices, axis=axis) def repeat(x, n): # this is a workaround for recurrent layer # if n is inferred dimension, # we can't figure out how to repeat it in cntk now # return the same x to take cntk broadcast feature # to make the recurrent layer work. # need to be fixed in GA. if n is C.InferredDimension or n is C.FreeDimension: return x index = 1 - _get_dynamic_axis_num(x) if index < 0 or index > 1: raise NotImplementedError new_shape = list(x.shape) new_shape.insert(index, 1) new_shape = tuple(new_shape) x = C.reshape(x, new_shape) temp = [x] * n return C.splice(*temp, axis=index) def tanh(x): return C.tanh(x) def _static_rnn(step_function, inputs, initial_states, go_backwards=False, mask=None, constants=None, unroll=False, input_length=None): shape = int_shape(inputs) dims = len(shape) uses_learning_phase = False if dims < 3: raise ValueError('Input should be at least 3D.') # if the second axis is static axis, CNTK will do unroll by default if shape[1] is None: raise ValueError('CNTK Backend: the input of static rnn ' 'has shape `%s`, the second axis ' 'is not static. If you want to run ' 'rnn with non-static axis, please try ' 'dynamic rnn with sequence axis.' % shape) if constants is None: constants = [] if mask is not None: mask_shape = int_shape(mask) if len(mask_shape) == dims - 1: mask = expand_dims(mask) nones = _get_dynamic_axis_num(inputs) states = tuple(initial_states) outputs = [] time_axis = 1 - nones if nones > 0 else 1 if go_backwards: i = shape[1] - 1 while i >= 0: current = C.ops.slice(inputs, time_axis, i, i + 1) # remove dummy dimension current = squeeze(current, time_axis) output, new_states = step_function( current, tuple(states) + tuple(constants)) if getattr(output, '_uses_learning_phase', False): uses_learning_phase = True if mask is not None: mask_slice = C.ops.slice(mask, time_axis, i, i + 1) mask_slice = squeeze(mask_slice, time_axis) if len(outputs) == 0: prev_output = zeros_like(output) else: prev_output = outputs[-1] output = C.ops.element_select(mask_slice, output, prev_output) return_states = [] for s, n_s in zip(states, new_states): return_states.append( C.ops.element_select( mask_slice, n_s, s)) new_states = return_states outputs.append(output) states = new_states i -= 1 else: i = 0 while i < shape[1]: current = C.ops.slice(inputs, time_axis, i, i + 1) # remove dummy dimension current = squeeze(current, 1) output, new_states = step_function( current, tuple(states) + tuple(constants)) if getattr(output, '_uses_learning_phase', False): uses_learning_phase = True if mask is not None: mask_slice = C.ops.slice(mask, time_axis, i, i + 1) mask_slice = squeeze(mask_slice, 1) if len(outputs) == 0: prev_output = zeros_like(output) else: prev_output = outputs[-1] output = C.ops.element_select(mask_slice, output, prev_output) return_states = [] for s, n_s in zip(states, new_states): return_states.append( C.ops.element_select( mask_slice, n_s, s)) new_states = return_states outputs.append(output) states = new_states[:len(states)] i += 1 i = 1 # add the time_step axis back final_output = expand_dims(outputs[0], 1) last_output = outputs[0] while i < len(outputs): # add the time_step axis back output_slice = expand_dims(outputs[i], 1) final_output = C.splice(final_output, output_slice, axis=time_axis) last_output = outputs[i] i += 1 last_output._uses_learning_phase = uses_learning_phase return last_output, final_output, states def rnn(step_function, inputs, initial_states, go_backwards=False, mask=None, constants=None, unroll=False, input_length=None): shape = int_shape(inputs) dims = len(shape) global uses_learning_phase uses_learning_phase = False if dims < 3: raise ValueError('CNTK Backend: the input of rnn has only rank %d ' 'Need at least rank 3 to run RNN.' % dims) if _get_dynamic_axis_num(inputs) == 0 or unroll: return _static_rnn( step_function, inputs, initial_states, go_backwards, mask, constants, unroll, input_length) if constants is None: constants = [] num_time_step = shape[1] if num_time_step is None and not has_seq_axis(inputs): num_time_step = inputs.shape[0] initial = [] for s in initial_states: if _get_dynamic_axis_num(s) == 0: if hasattr(C, 'to_batch'): initial.append(C.to_batch(s)) else: initial.append(C.user_function(ConvertToBatch(s))) else: initial.append(s) need_convert = not has_seq_axis(inputs) if go_backwards and need_convert is False: raise NotImplementedError('CNTK Backend: `go_backwards` is not supported with ' 'variable-length sequences. Please specify a ' 'static length for your sequences.') rnn_inputs = inputs if need_convert: if go_backwards: rnn_inputs = reverse(rnn_inputs, 1) rnn_inputs = C.to_sequence(rnn_inputs) rnn_constants = [] for constant in constants: if isinstance(constant, list): new_c = [] for c in constant: if _get_dynamic_axis_num(c) == 1: new_c.append(C.sequence.broadcast_as(c, rnn_inputs)) else: new_c.append(c) rnn_constants.append(new_c) else: if _get_dynamic_axis_num(constant) == 1: rnn_constants.append(C.sequence.broadcast_as(constant, rnn_inputs)) else: rnn_constants.append(constant) else: rnn_constants = constants if mask is not None and not has_seq_axis(mask): if go_backwards: mask = reverse(mask, 1) if len(int_shape(mask)) == 2: mask = expand_dims(mask) mask = C.to_sequence_like(mask, rnn_inputs) states = tuple(initial) with C.default_options(axis_offset=1): def _recurrence(x, states, m): # create place holder place_holders = [C.placeholder(dynamic_axes=x.dynamic_axes) for _ in states] past_values = [] for s, p in zip(states, place_holders): past_values.append(C.sequence.past_value(p, s)) new_output, new_states = step_function( x, tuple(past_values) + tuple(rnn_constants)) if getattr(new_output, '_uses_learning_phase', False): global uses_learning_phase uses_learning_phase = True if m is not None: new_states = [C.element_select(m, n, s) for n, s in zip(new_states, past_values)] n_s = [] for o, p in zip(new_states, place_holders): n_s.append(o.replace_placeholders({p: o.output})) if len(n_s) > 0: new_output = n_s[0] return new_output, n_s final_output, final_states = _recurrence(rnn_inputs, states, mask) last_output = C.sequence.last(final_output) last_states = [C.sequence.last(s) for s in final_states] if need_convert: final_output = C.sequence.unpack(final_output, 0, no_mask_output=True) if num_time_step is not None and num_time_step is not C.FreeDimension: final_output = _reshape_sequence(final_output, num_time_step) f_stats = [] for l_s, i_s in zip(last_states, initial_states): if _get_dynamic_axis_num(i_s) == 0 and _get_dynamic_axis_num(l_s) == 1: if hasattr(C, 'unpack_batch'): f_stats.append(C.unpack_batch(l_s)) else: f_stats.append(C.user_function(ConvertToStatic(l_s, batch_size=i_s.shape[0]))) else: f_stats.append(l_s) last_output._uses_learning_phase = uses_learning_phase return last_output, final_output, f_stats def has_seq_axis(x): return hasattr(x, 'dynamic_axes') and len(x.dynamic_axes) > 1 def l2_normalize(x, axis=None): axis = [axis] axis = _normalize_axis(axis, x) norm = C.sqrt(C.reduce_sum(C.square(x), axis=axis[0])) return x / norm def hard_sigmoid(x): x = (0.2 * x) + 0.5 x = C.clip(x, 0.0, 1.0) return x def conv1d(x, kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) if padding == 'causal': # causal (dilated) convolution: left_pad = dilation_rate * (kernel.shape[0] - 1) x = temporal_padding(x, (left_pad, 0)) padding = 'valid' if data_format == 'channels_last': x = C.swapaxes(x, 0, 1) kernel = C.swapaxes(kernel, 0, 2) padding = _preprocess_border_mode(padding) strides = [strides] x = C.convolution( kernel, x, strides=tuple(strides), auto_padding=[ False, padding]) if data_format == 'channels_last': x = C.swapaxes(x, 0, 1) return x def conv2d(x, kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) x = _preprocess_conv2d_input(x, data_format) kernel = _preprocess_conv2d_kernel(kernel, data_format) padding = _preprocess_border_mode(padding) if dilation_rate == (1, 1): strides = (1,) + strides x = C.convolution( kernel, x, strides, auto_padding=[ False, padding, padding]) else: assert dilation_rate[0] == dilation_rate[1] assert strides == (1, 1), 'Invalid strides for dilated convolution' x = C.convolution( kernel, x, strides=dilation_rate[0], auto_padding=[ False, padding, padding]) return _postprocess_conv2d_output(x, data_format) def separable_conv1d(x, depthwise_kernel, pointwise_kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): raise NotImplementedError def separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) x = _preprocess_conv2d_input(x, data_format) depthwise_kernel = _preprocess_conv2d_kernel(depthwise_kernel, data_format) depthwise_kernel = C.reshape(C.transpose(depthwise_kernel, (1, 0, 2, 3)), (-1, 1) + depthwise_kernel.shape[2:]) pointwise_kernel = _preprocess_conv2d_kernel(pointwise_kernel, data_format) padding = _preprocess_border_mode(padding) if dilation_rate == (1, 1): strides = (1,) + strides x = C.convolution(depthwise_kernel, x, strides=strides, auto_padding=[False, padding, padding], groups=x.shape[0]) x = C.convolution(pointwise_kernel, x, strides=(1, 1, 1), auto_padding=[False]) else: if dilation_rate[0] != dilation_rate[1]: raise ValueError('CNTK Backend: non-square dilation_rate is ' 'not supported.') if strides != (1, 1): raise ValueError('Invalid strides for dilated convolution') x = C.convolution(depthwise_kernel, x, strides=dilation_rate[0], auto_padding=[False, padding, padding]) x = C.convolution(pointwise_kernel, x, strides=(1, 1, 1), auto_padding=[False]) return _postprocess_conv2d_output(x, data_format) def depthwise_conv2d(x, depthwise_kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) x = _preprocess_conv2d_input(x, data_format) depthwise_kernel = _preprocess_conv2d_kernel(depthwise_kernel, data_format) depthwise_kernel = C.reshape(C.transpose(depthwise_kernel, (1, 0, 2, 3)), (-1, 1) + depthwise_kernel.shape[2:]) padding = _preprocess_border_mode(padding) if dilation_rate == (1, 1): strides = (1,) + strides x = C.convolution(depthwise_kernel, x, strides=strides, auto_padding=[False, padding, padding], groups=x.shape[0]) else: if dilation_rate[0] != dilation_rate[1]: raise ValueError('CNTK Backend: non-square dilation_rate is ' 'not supported.') if strides != (1, 1): raise ValueError('Invalid strides for dilated convolution') x = C.convolution(depthwise_kernel, x, strides=dilation_rate[0], auto_padding=[False, padding, padding], groups=x.shape[0]) return _postprocess_conv2d_output(x, data_format) def conv3d(x, kernel, strides=(1, 1, 1), padding='valid', data_format=None, dilation_rate=(1, 1, 1)): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) x = _preprocess_conv3d_input(x, data_format) kernel = _preprocess_conv3d_kernel(kernel, data_format) padding = _preprocess_border_mode(padding) strides = strides + (strides[0],) x = C.convolution( kernel, x, strides, auto_padding=[ False, padding, padding, padding]) return _postprocess_conv3d_output(x, data_format) def conv3d_transpose(x, kernel, output_shape, strides=(1, 1, 1), padding='valid', data_format=None): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) x = _preprocess_conv3d_input(x, data_format) kernel = _preprocess_conv3d_kernel(kernel, data_format) padding = _preprocess_border_mode(padding) strides = (1,) + strides # cntk output_shape does not include batch axis output_shape = output_shape[1:] # in keras2, need handle output shape in different format if data_format == 'channels_last': shape = list(output_shape) shape[0] = output_shape[3] shape[1] = output_shape[0] shape[2] = output_shape[1] shape[3] = output_shape[2] output_shape = tuple(shape) x = C.convolution_transpose( kernel, x, strides, auto_padding=[ False, padding, padding, padding], output_shape=output_shape) return _postprocess_conv3d_output(x, data_format) def pool2d(x, pool_size, strides=(1, 1), padding='valid', data_format=None, pool_mode='max'): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) padding = _preprocess_border_mode(padding) strides = strides pool_size = pool_size x = _preprocess_conv2d_input(x, data_format) if pool_mode == 'max': x = C.pooling( x, C.MAX_POOLING, pool_size, strides, auto_padding=[padding]) elif pool_mode == 'avg': x = C.pooling( x, C.AVG_POOLING, pool_size, strides, auto_padding=[padding]) else: raise ValueError('Invalid pooling mode: ' + str(pool_mode)) return _postprocess_conv2d_output(x, data_format) def pool3d(x, pool_size, strides=(1, 1, 1), padding='valid', data_format=None, pool_mode='max'): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) padding = _preprocess_border_mode(padding) x = _preprocess_conv3d_input(x, data_format) if pool_mode == 'max': x = C.pooling( x, C.MAX_POOLING, pool_size, strides, auto_padding=[padding]) elif pool_mode == 'avg': x = C.pooling( x, C.AVG_POOLING, pool_size, strides, auto_padding=[padding]) else: raise ValueError('Invalid pooling mode: ' + str(pool_mode)) return _postprocess_conv3d_output(x, data_format) def relu(x, alpha=0., max_value=None): if alpha != 0.: negative_part = C.relu(-x) x = C.relu(x) if max_value is not None: x = C.clip(x, 0.0, max_value) if alpha != 0.: x -= alpha * negative_part return x def dropout(x, level, noise_shape=None, seed=None): if level < 0. or level >= 1: raise ValueError('CNTK Backend: Invalid dropout level %s, ' 'must be in interval [0, 1].' % level) return C.dropout(x, level) def batch_flatten(x): # cntk's batch axis is not in shape, # so just flatten all the dim in x.shape dim = np.prod(x.shape) x = C.reshape(x, (-1,)) x._keras_shape = (None, dim) return x def softmax(x, axis=-1): return C.softmax(x, axis=axis) def softplus(x): return C.softplus(x) def softsign(x): return x / (1 + C.abs(x)) def categorical_crossentropy(target, output, from_logits=False): if from_logits: result = C.cross_entropy_with_softmax(output, target) # cntk's result shape is (batch, 1), while keras expect (batch, ) return C.reshape(result, ()) else: # scale preds so that the class probas of each sample sum to 1 output /= C.reduce_sum(output, axis=-1) # avoid numerical instability with epsilon clipping output = C.clip(output, epsilon(), 1.0 - epsilon()) return -sum(target * C.log(output), axis=-1) def sparse_categorical_crossentropy(target, output, from_logits=False): target = C.one_hot(target, output.shape[-1]) target = C.reshape(target, output.shape) return categorical_crossentropy(target, output, from_logits) class Function(object): def __init__(self, inputs, outputs, updates=[], **kwargs): self.placeholders = inputs self.trainer = None self.unrelated_updates = None self.updates = updates if len(updates) > 0: assert len(outputs) > 0 self.loss = outputs[0] # need group update by gradient place holder u_ops = [] unrelated_updates = [] for update in updates: if isinstance(update, tuple): if len(update) != 2: raise NotImplementedError else: u = C.assign(update[0], update[1]) else: u = update if len(u.arguments) == 0: u_ops.append(u) else: unrelated_updates.append(u) update_func = C.combine([u.output for u in u_ops]) grads = update_func.find_all_with_name('keras_grad_placeholder') u_list = [] p_list = [] for g in grads: if g in grad_parameter_dict: p_list.append(grad_parameter_dict[g]) u_list.append(g) else: raise ValueError( 'CNTK backend: when constructing trainer, ' 'found gradient node `%s` which is not ' 'related to any parameters in the model. ' 'Please double check how the gradient node ' 'is constructed.' % g) if len(u_list) > 0: learner = C.cntk_py.universal_learner(p_list, u_list, update_func) criterion = ( outputs[0], outputs[1]) if len(outputs) > 1 else ( outputs[0], ) self.trainer = C.trainer.Trainer( outputs[0], criterion, [learner]) self.trainer_output = tuple([f.output for f in criterion]) elif len(u_ops) > 0: unrelated_updates.extend(u_ops) if len(unrelated_updates) > 0: self.unrelated_updates = C.combine([_.output for _ in unrelated_updates]) if self.trainer is None: self.metrics_outputs = [f.output for f in outputs] self.metrics_func = C.combine(self.metrics_outputs) # cntk only could handle loss and 1 metric in trainer, for metrics more # than 2, need manual eval elif len(outputs) > 2: self.metrics_outputs = [f.output for f in outputs[2:]] self.metrics_func = C.combine(self.metrics_outputs) else: self.metrics_func = None @staticmethod def _is_input_shape_compatible(input, placeholder): if hasattr(input, 'shape') and hasattr(placeholder, 'shape'): num_dynamic = get_num_dynamic_axis(placeholder) input_shape = input.shape[num_dynamic:] placeholder_shape = placeholder.shape for i, p in zip(input_shape, placeholder_shape): if i != p and p != C.InferredDimension and p != C.FreeDimension: return False return True def __call__(self, inputs): global _LEARNING_PHASE_PLACEHOLDER global _LEARNING_PHASE assert isinstance(inputs, (list, tuple)) feed_dict = {} for tensor, value in zip(self.placeholders, inputs): # cntk only support calculate on float, do auto cast here if (hasattr(value, 'dtype') and value.dtype != np.float32 and value.dtype != np.float64): value = value.astype(np.float32) if tensor == _LEARNING_PHASE_PLACEHOLDER: _LEARNING_PHASE_PLACEHOLDER.value = np.asarray(value) else: # in current version cntk can't support input with variable # length. Will support it in next release. if not self._is_input_shape_compatible(value, tensor): raise ValueError('CNTK backend: The placeholder has been resolved ' 'to shape `%s`, but input shape is `%s`. Currently ' 'CNTK can not take variable length inputs. Please ' 'pass inputs that have a static shape.' % (str(tensor.shape), str(value.shape))) feed_dict[tensor] = value updated = [] if self.trainer is not None: input_dict = {} for argument in self.loss.arguments: if argument in feed_dict: input_dict[argument] = feed_dict[argument] else: raise ValueError( 'CNTK backend: argument %s is not found in inputs. ' 'Please double check the model and inputs in ' '`train_function`.' % argument.name) result = self.trainer.train_minibatch( input_dict, self.trainer_output) assert(len(result) == 2) outputs = result[1] for o in self.trainer_output: updated.append(outputs[o]) if self.metrics_func is not None: input_dict = {} for argument in self.metrics_func.arguments: if argument in feed_dict: input_dict[argument] = feed_dict[argument] else: raise ValueError('CNTK backend: metrics argument %s ' 'is not found in inputs. Please double ' 'check the model and inputs.' % argument.name) # Some ops (like dropout) won't be applied during "eval" in cntk. # They only evaluated in training phase. To make it work, call # "forward" method to let cntk know we want to evaluate them.from # But the assign ops won't be executed under this mode, that's why # we need this check. if (self.unrelated_updates is None and (_LEARNING_PHASE_PLACEHOLDER.value == 1.0 or _LEARNING_PHASE == 1)): _, output_values = self.metrics_func.forward( input_dict, self.metrics_func.outputs, (self.metrics_func.outputs[0],), as_numpy=False) else: output_values = self.metrics_func.eval(input_dict, as_numpy=False) if isinstance(output_values, dict): for o in self.metrics_outputs: value = output_values[o] v = value.asarray() updated.append(v) else: v = output_values.asarray() for o in self.metrics_outputs: updated.append(v) if self.unrelated_updates is not None: input_dict = {} for argument in self.unrelated_updates.arguments: if argument in feed_dict: input_dict[argument] = feed_dict[argument] else: raise ValueError( 'CNTK backend: assign ops argument %s ' 'is not found in inputs. Please double ' 'check the model and inputs.' % argument.name) self.unrelated_updates.eval(input_dict, as_numpy=False) return updated def function(inputs, outputs, updates=[], **kwargs): return Function(inputs, outputs, updates=updates, **kwargs) def temporal_padding(x, padding=(1, 1)): assert len(padding) == 2 num_dynamic_axis = _get_dynamic_axis_num(x) base_shape = x.shape if num_dynamic_axis > 0: assert len(base_shape) == 2 if hasattr(C, 'pad'): x = C.pad(x, pattern=[padding, (0, 0)]) else: x = _padding(x, padding, 0) else: assert len(base_shape) == 3 if hasattr(C, 'pad'): x = C.pad(x, pattern=[(0, 0), padding, (0, 0)]) else: x = _padding(x, padding, 1) return x def _padding(x, pattern, axis): base_shape = x.shape if b_any([dim < 0 for dim in base_shape]): raise ValueError('CNTK Backend: padding input tensor with ' 'shape `%s` contains non-specified dimension, ' 'which is not supported. Please give fixed ' 'dimension to enable padding.' % base_shape) if pattern[0] > 0: prefix_shape = list(base_shape) prefix_shape[axis] = pattern[0] prefix_shape = tuple(prefix_shape) x = C.splice(C.constant(value=0, shape=prefix_shape), x, axis=axis) base_shape = x.shape if pattern[1] > 0: postfix_shape = list(base_shape) postfix_shape[axis] = pattern[1] postfix_shape = tuple(postfix_shape) x = C.splice(x, C.constant(value=0, shape=postfix_shape), axis=axis) return x def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None): assert len(padding) == 2 assert len(padding[0]) == 2 assert len(padding[1]) == 2 if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) num_dynamic_axis = _get_dynamic_axis_num(x) base_shape = x.shape if data_format == 'channels_first': if num_dynamic_axis > 0: assert len(base_shape) == 3 if hasattr(C, 'pad'): x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1])]) else: x = _padding(x, padding[0], 1) x = _padding(x, padding[1], 2) else: assert len(base_shape) == 4 if hasattr(C, 'pad'): x = C.pad(x, pattern=[[0, 0], [0, 0], list(padding[0]), list(padding[1])]) else: x = _padding(x, padding[0], 2) x = _padding(x, padding[1], 3) else: if num_dynamic_axis > 0: assert len(base_shape) == 3 if hasattr(C, 'pad'): x = C.pad(x, pattern=[list(padding[0]), list(padding[1]), [0, 0]]) else: x = _padding(x, padding[0], 0) x = _padding(x, padding[1], 1) else: assert len(base_shape) == 4 if hasattr(C, 'pad'): x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]), [0, 0]]) else: x = _padding(x, padding[0], 1) x = _padding(x, padding[1], 2) return x def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None): assert len(padding) == 3 assert len(padding[0]) == 2 assert len(padding[1]) == 2 assert len(padding[2]) == 2 if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) num_dynamic_axis = _get_dynamic_axis_num(x) base_shape = x.shape if data_format == 'channels_first': if num_dynamic_axis > 0: assert len(base_shape) == 4 if hasattr(C, 'pad'): x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]), list(padding[2])]) else: x = _padding(x, padding[0], 1) x = _padding(x, padding[1], 2) x = _padding(x, padding[2], 3) else: assert len(base_shape) == 5 if hasattr(C, 'pad'): x = C.pad(x, pattern=[[0, 0], [0, 0], list(padding[0]), list(padding[1]), list(padding[2])]) else: x = _padding(x, padding[0], 2) x = _padding(x, padding[1], 3) x = _padding(x, padding[2], 4) else: if num_dynamic_axis > 0: assert len(base_shape) == 4 if hasattr(C, 'pad'): x = C.pad(x, pattern=[list(padding[0]), list(padding[1]), list(padding[2]), [0, 0]]) else: x = _padding(x, padding[0], 0) x = _padding(x, padding[1], 1) x = _padding(x, padding[2], 2) else: assert len(base_shape) == 5 if hasattr(C, 'pad'): x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]), list(padding[2]), [0, 0]]) else: x = _padding(x, padding[0], 1) x = _padding(x, padding[1], 2) x = _padding(x, padding[2], 3) return x def one_hot(indices, num_classes): return C.one_hot(indices, num_classes) def get_value(x): if isinstance( x, C.variables.Parameter) or isinstance( x, C.variables.Constant): return x.value else: return eval(x) def batch_get_value(xs): result = [] for x in xs: if (isinstance(x, C.variables.Parameter) or isinstance(x, C.variables.Constant)): result.append(x.value) else: result.append(eval(x)) return result def set_value(x, value): if (isinstance(x, C.variables.Parameter) or isinstance(x, C.variables.Constant)): if isinstance(value, (float, int)): value = np.full(x.shape, value, dtype=floatx()) x.value = value else: raise NotImplementedError def print_tensor(x, message=''): return C.user_function( LambdaFunc(x, when=lambda x: True, execute=lambda x: print(message))) def batch_set_value(tuples): for t in tuples: x = t[0] value = t[1] if isinstance(value, np.ndarray) is False: value = np.asarray(value) if isinstance(x, C.variables.Parameter): x.value = value else: raise NotImplementedError def stop_gradient(variables): if isinstance(variables, (list, tuple)): return map(C.stop_gradient, variables) else: return C.stop_gradient(variables) def switch(condition, then_expression, else_expression): ndim_cond = ndim(condition) ndim_expr = ndim(then_expression) if ndim_cond > ndim_expr: raise ValueError('Rank of condition should be less' ' than or equal to rank of then and' ' else expressions. ndim(condition)=' + str(ndim_cond) + ', ndim(then_expression)' '=' + str(ndim_expr)) elif ndim_cond < ndim_expr: shape_expr = int_shape(then_expression) ndim_diff = ndim_expr - ndim_cond for i in range(ndim_diff): condition = expand_dims(condition) condition = tile(condition, shape_expr[ndim_cond + i]) return C.element_select(condition, then_expression, else_expression) def elu(x, alpha=1.): res = C.elu(x) if alpha == 1: return res else: return C.element_select(C.greater(x, 0), res, alpha * res) def in_top_k(predictions, targets, k): _targets = C.one_hot(targets, predictions.shape[-1]) result = C.classification_error(predictions, _targets, topN=k) return 1 - C.reshape(result, shape=()) def conv2d_transpose(x, kernel, output_shape, strides=(1, 1), padding='valid', data_format=None): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) x = _preprocess_conv2d_input(x, data_format) kernel = _preprocess_conv2d_kernel(kernel, data_format) padding = _preprocess_border_mode(padding) strides = (1,) + strides # cntk output_shape does not include batch axis output_shape = output_shape[1:] # in keras2, need handle output shape in different format if data_format == 'channels_last': shape = list(output_shape) shape[0] = output_shape[2] shape[1] = output_shape[0] shape[2] = output_shape[1] output_shape = tuple(shape) x = C.convolution_transpose( kernel, x, strides, auto_padding=[ False, padding, padding], output_shape=output_shape) return _postprocess_conv2d_output(x, data_format) def identity(x, name=None): if name is None: name = '%s_alias' % x.name return C.alias(x, name=name) def _preprocess_conv2d_input(x, data_format): if data_format == 'channels_last': # TF uses the last dimension as channel dimension, # instead of the 2nd one. # TH input shape: (samples, input_depth, rows, cols) # TF input shape: (samples, rows, cols, input_depth) x = C.transpose(x, (2, 0, 1)) return x def _preprocess_conv2d_kernel(kernel, data_format): # As of Keras 2.0.0, all kernels are normalized # on the format `(rows, cols, input_depth, depth)`, # independently of `data_format`. # CNTK expects `(depth, input_depth, rows, cols)`. kernel = C.transpose(kernel, (3, 2, 0, 1)) return kernel def _preprocess_border_mode(padding): if padding == 'same': padding = True elif padding == 'valid': padding = False else: raise ValueError('Invalid border mode: ' + str(padding)) return padding def _postprocess_conv2d_output(x, data_format): if data_format == 'channels_last': x = C.transpose(x, (1, 2, 0)) return x def _preprocess_conv3d_input(x, data_format): if data_format == 'channels_last': # TF uses the last dimension as channel dimension, # instead of the 2nd one. # TH input shape: (samples, input_depth, conv_dim1, conv_dim2, conv_dim3) # TF input shape: (samples, conv_dim1, conv_dim2, conv_dim3, # input_depth) x = C.transpose(x, (3, 0, 1, 2)) return x def _preprocess_conv3d_kernel(kernel, dim_ordering): kernel = C.transpose(kernel, (4, 3, 0, 1, 2)) return kernel def _postprocess_conv3d_output(x, dim_ordering): if dim_ordering == 'channels_last': x = C.transpose(x, (1, 2, 3, 0)) return x def _get_dynamic_axis_num(x): if hasattr(x, 'dynamic_axes'): return len(x.dynamic_axes) else: return 0 def _contain_seqence_axis(x): if _get_dynamic_axis_num(x) > 1: return x.dynamic_axes[1] == C.Axis.default_dynamic_axis() else: return False def get_num_dynamic_axis(x): return _get_dynamic_axis_num(x) def _reduce_on_axis(x, axis, reduce_fun_name): if isinstance(axis, list): for a in axis: if isinstance(a, C.Axis) \ and a != C.Axis.default_batch_axis() \ and hasattr(C.sequence, reduce_fun_name): x = getattr(C.sequence, reduce_fun_name)(x, a) else: x = getattr(C, reduce_fun_name)(x, a) else: x = getattr(C, reduce_fun_name)(x, axis) return x def _reshape_sequence(x, time_step): tmp_shape = list(int_shape(x)) tmp_shape[1] = time_step return reshape(x, tmp_shape) def local_conv1d(inputs, kernel, kernel_size, strides, data_format=None): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) stride = strides[0] kernel_shape = int_shape(kernel) output_length, feature_dim, filters = kernel_shape xs = [] for i in range(output_length): slice_length = slice(i * stride, i * stride + kernel_size[0]) xs.append(reshape(inputs[:, slice_length, :], (-1, 1, feature_dim))) x_aggregate = concatenate(xs, axis=1) # transpose kernel to output_filters first, to apply broadcast weight = permute_dimensions(kernel, (2, 0, 1)) # Shape: (batch, filters, output_length, input_length * kernel_size) output = x_aggregate * weight # Shape: (batch, filters, output_length) output = sum(output, axis=3) # Shape: (batch, output_length, filters) return permute_dimensions(output, (0, 2, 1)) def local_conv2d(inputs, kernel, kernel_size, strides, output_shape, data_format=None): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) stride_row, stride_col = strides output_row, output_col = output_shape kernel_shape = int_shape(kernel) _, feature_dim, filters = kernel_shape xs = [] for i in range(output_row): for j in range(output_col): slice_row = slice(i * stride_row, i * stride_row + kernel_size[0]) slice_col = slice(j * stride_col, j * stride_col + kernel_size[1]) if data_format == 'channels_first': xs.append(reshape(inputs[:, :, slice_row, slice_col], (-1, 1, feature_dim))) else: xs.append(reshape(inputs[:, slice_row, slice_col, :], (-1, 1, feature_dim))) x_aggregate = concatenate(xs, axis=1) # transpose kernel to put filters first weight = permute_dimensions(kernel, (2, 0, 1)) # shape: batch, filters, output_length, input_length * kernel_size output = x_aggregate * weight # shape: batch, filters, output_length output = sum(output, axis=3) # shape: batch, filters, row, col output = reshape(output, (-1, filters, output_row, output_col)) if data_format == 'channels_last': # shape: batch, row, col, filters output = permute_dimensions(output, (0, 2, 3, 1)) return output def reverse(x, axes): if isinstance(axes, int): axes = [axes] cntk_axes = _normalize_axis(axes, x) begin_index = [0 for _ in cntk_axes] end_index = [0 for _ in cntk_axes] strides = [-1 for _ in cntk_axes] return C.slice(x, cntk_axes, begin_index, end_index, strides) def _reshape_batch(x, shape): # there is a bug in cntk 2.1's unpack_batch implementation if hasattr(C, 'unpack_batch') and _get_cntk_version() >= 2.2: const_a = C.unpack_batch(x) const_a = C.reshape(const_a, shape) return C.to_batch(const_a) else: return C.user_function(ReshapeBatch(x, shape[1:])) def _get_cntk_version(): version = C.__version__ if version.endswith('+'): version = version[:-1] # for hot fix, ignore all the . except the first one. if len(version) > 2 and version[1] == '.': version = version[:2] + version[2:].replace('.', '') try: return float(version) except: warnings.warn( 'CNTK backend warning: CNTK version not detected. ' 'Will using CNTK 2.0 GA as default.') return float(2.0) class ReshapeBatch(C.ops.functions.UserFunction): def __init__(self, input, shape, name='reshape_with_batch'): super(ReshapeBatch, self).__init__([input], as_numpy=False, name=name) self.from_shape = input.shape self.target_shape = shape def infer_outputs(self): batch_axis = C.Axis.default_batch_axis() return [ C.output_variable( self.target_shape, self.inputs[0].dtype, [batch_axis])] def forward(self, arguments, device=None, outputs_to_retain=None): num_element = arguments.shape()[0] * np.prod(np.asarray(self.from_shape)) num_static_element = np.prod(np.asarray(self.target_shape)) num_batch = int(num_element / num_static_element) result = arguments.data().as_shape((num_batch,) + self.target_shape) return None, C.cntk_py.Value(result) def backward(self, state, root_gradients): grad_array_view = root_gradients.data() num_element = root_gradients.shape()[0] * np.prod(
np.asarray(self.target_shape)
numpy.asarray
''' <NAME> set up :2020-1-9 intergrate img and label into one file -- fiducial1024_v1 ''' import argparse import sys, os import pickle import random import collections import json import numpy as np import scipy.io as io import scipy.misc as m import matplotlib.pyplot as plt import glob import math import time import threading import multiprocessing as mp from multiprocessing import Pool import re import cv2 # sys.path.append('/lustre/home/gwxie/hope/project/dewarp/datasets/') # /lustre/home/gwxie/program/project/unwarp/perturbed_imgaes/GAN import utils def getDatasets(dir): return os.listdir(dir) class perturbed(utils.BasePerturbed): def __init__(self, path, bg_path, save_path, save_suffix): self.path = path self.bg_path = bg_path self.save_path = save_path self.save_suffix = save_suffix def save_img(self, m, n, fold_curve='fold', repeat_time=4, fiducial_points = 16, relativeShift_position='relativeShift_v2'): origin_img = cv2.imread(self.path, flags=cv2.IMREAD_COLOR) save_img_shape = [512*2, 480*2] # 320 # reduce_value = np.random.choice([2**4, 2**5, 2**6, 2**7, 2**8], p=[0.01, 0.1, 0.4, 0.39, 0.1]) reduce_value = np.random.choice([2*2, 4*2, 8*2, 16*2, 24*2, 32*2, 40*2, 48*2], p=[0.02, 0.18, 0.2, 0.3, 0.1, 0.1, 0.08, 0.02]) # reduce_value = np.random.choice([8*2, 16*2, 24*2, 32*2, 40*2, 48*2], p=[0.01, 0.02, 0.2, 0.4, 0.19, 0.18]) # reduce_value = np.random.choice([16, 24, 32, 40, 48, 64], p=[0.01, 0.1, 0.2, 0.4, 0.2, 0.09]) base_img_shrink = save_img_shape[0] - reduce_value # enlarge_img_shrink = [1024, 768] # enlarge_img_shrink = [896, 672] # 420 enlarge_img_shrink = [512*4, 480*4] # 420 # enlarge_img_shrink = [896*2, 768*2] # 420 # enlarge_img_shrink = [896, 768] # 420 # enlarge_img_shrink = [768, 576] # 420 # enlarge_img_shrink = [640, 480] # 420 '''''' im_lr = origin_img.shape[0] im_ud = origin_img.shape[1] reduce_value_v2 = np.random.choice([2*2, 4*2, 8*2, 16*2, 24*2, 28*2, 32*2, 48*2], p=[0.02, 0.18, 0.2, 0.2, 0.1, 0.1, 0.1, 0.1]) # reduce_value_v2 = np.random.choice([16, 24, 28, 32, 48, 64], p=[0.01, 0.1, 0.2, 0.3, 0.25, 0.14]) if im_lr > im_ud: im_ud = min(int(im_ud / im_lr * base_img_shrink), save_img_shape[1] - reduce_value_v2) im_lr = save_img_shape[0] - reduce_value else: base_img_shrink = save_img_shape[1] - reduce_value im_lr = min(int(im_lr / im_ud * base_img_shrink), save_img_shape[0] - reduce_value_v2) im_ud = base_img_shrink if round(im_lr / im_ud, 2) < 0.5 or round(im_ud / im_lr, 2) < 0.5: repeat_time = min(repeat_time, 8) edge_padding = 3 im_lr -= im_lr % (fiducial_points-1) - (2*edge_padding) # im_lr % (fiducial_points-1) - 1 im_ud -= im_ud % (fiducial_points-1) - (2*edge_padding) # im_ud % (fiducial_points-1) - 1 im_hight = np.linspace(edge_padding, im_lr - edge_padding, fiducial_points, dtype=np.int64) im_wide = np.linspace(edge_padding, im_ud - edge_padding, fiducial_points, dtype=np.int64) # im_lr -= im_lr % (fiducial_points-1) - (1+2*edge_padding) # im_lr % (fiducial_points-1) - 1 # im_ud -= im_ud % (fiducial_points-1) - (1+2*edge_padding) # im_ud % (fiducial_points-1) - 1 # im_hight = np.linspace(edge_padding, im_lr - (1+edge_padding), fiducial_points, dtype=np.int64) # im_wide = np.linspace(edge_padding, im_ud - (1+edge_padding), fiducial_points, dtype=np.int64) im_x, im_y = np.meshgrid(im_hight, im_wide) segment_x = (im_lr) // (fiducial_points-1) segment_y = (im_ud) // (fiducial_points-1) # plt.plot(im_x, im_y, # color='limegreen', # marker='.', # linestyle='') # plt.grid(True) # plt.show() self.origin_img = cv2.resize(origin_img, (im_ud, im_lr), interpolation=cv2.INTER_CUBIC) perturbed_bg_ = getDatasets(self.bg_path) perturbed_bg_img_ = self.bg_path+random.choice(perturbed_bg_) perturbed_bg_img = cv2.imread(perturbed_bg_img_, flags=cv2.IMREAD_COLOR) mesh_shape = self.origin_img.shape[:2] self.synthesis_perturbed_img = np.full((enlarge_img_shrink[0], enlarge_img_shrink[1], 3), 256, dtype=np.float32)#np.zeros_like(perturbed_bg_img) # self.synthesis_perturbed_img = np.full((enlarge_img_shrink[0], enlarge_img_shrink[1], 3), 0, dtype=np.int16)#np.zeros_like(perturbed_bg_img) self.new_shape = self.synthesis_perturbed_img.shape[:2] perturbed_bg_img = cv2.resize(perturbed_bg_img, (save_img_shape[1], save_img_shape[0]), cv2.INPAINT_TELEA) origin_pixel_position = np.argwhere(np.zeros(mesh_shape, dtype=np.uint32) == 0).reshape(mesh_shape[0], mesh_shape[1], 2) pixel_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape(self.new_shape[0], self.new_shape[1], 2) self.perturbed_xy_ = np.zeros((self.new_shape[0], self.new_shape[1], 2)) # self.perturbed_xy_ = pixel_position.copy().astype(np.float32) # fiducial_points_grid = origin_pixel_position[im_x, im_y] self.synthesis_perturbed_label = np.zeros((self.new_shape[0], self.new_shape[1], 2)) x_min, y_min, x_max, y_max = self.adjust_position_v2(0, 0, mesh_shape[0], mesh_shape[1], save_img_shape) origin_pixel_position += [x_min, y_min] x_min, y_min, x_max, y_max = self.adjust_position(0, 0, mesh_shape[0], mesh_shape[1]) x_shift = random.randint(-enlarge_img_shrink[0]//16, enlarge_img_shrink[0]//16) y_shift = random.randint(-enlarge_img_shrink[1]//16, enlarge_img_shrink[1]//16) x_min += x_shift x_max += x_shift y_min += y_shift y_max += y_shift '''im_x,y''' im_x += x_min im_y += y_min self.synthesis_perturbed_img[x_min:x_max, y_min:y_max] = self.origin_img self.synthesis_perturbed_label[x_min:x_max, y_min:y_max] = origin_pixel_position synthesis_perturbed_img_map = self.synthesis_perturbed_img.copy() synthesis_perturbed_label_map = self.synthesis_perturbed_label.copy() foreORbackground_label = np.full((mesh_shape), 1, dtype=np.int16) foreORbackground_label_map = np.full((self.new_shape), 0, dtype=np.int16) foreORbackground_label_map[x_min:x_max, y_min:y_max] = foreORbackground_label # synthesis_perturbed_img_map = self.pad(self.synthesis_perturbed_img.copy(), x_min, y_min, x_max, y_max) # synthesis_perturbed_label_map = self.pad(synthesis_perturbed_label_map, x_min, y_min, x_max, y_max) '''*****************************************************************''' is_normalizationFun_mixture = self.is_perform(0.2, 0.8) # if not is_normalizationFun_mixture: normalizationFun_0_1 = False # normalizationFun_0_1 = self.is_perform(0.5, 0.5) if fold_curve == 'fold': fold_curve_random = True # is_normalizationFun_mixture = False normalizationFun_0_1 = self.is_perform(0.2, 0.8) if is_normalizationFun_mixture: alpha_perturbed = random.randint(80, 120) / 100 else: if normalizationFun_0_1 and repeat_time < 8: alpha_perturbed = random.randint(50, 70) / 100 else: alpha_perturbed = random.randint(70, 130) / 100 else: fold_curve_random = self.is_perform(0.1, 0.9) # False # self.is_perform(0.01, 0.99) alpha_perturbed = random.randint(80, 160) / 100 # is_normalizationFun_mixture = False # self.is_perform(0.01, 0.99) synthesis_perturbed_img = np.full_like(self.synthesis_perturbed_img, 256) # synthesis_perturbed_img = np.full_like(self.synthesis_perturbed_img, 0, dtype=np.int16) synthesis_perturbed_label = np.zeros_like(self.synthesis_perturbed_label) alpha_perturbed_change = self.is_perform(0.5, 0.5) p_pp_choice = self.is_perform(0.8, 0.2) if fold_curve == 'fold' else self.is_perform(0.1, 0.9) for repeat_i in range(repeat_time): if alpha_perturbed_change: if fold_curve == 'fold': if is_normalizationFun_mixture: alpha_perturbed = random.randint(80, 120) / 100 else: if normalizationFun_0_1 and repeat_time < 8: alpha_perturbed = random.randint(50, 70) / 100 else: alpha_perturbed = random.randint(70, 130) / 100 else: alpha_perturbed = random.randint(80, 160) / 100 '''''' linspace_x = [0, (self.new_shape[0] - im_lr) // 2 - 1, self.new_shape[0] - (self.new_shape[0] - im_lr) // 2 - 1, self.new_shape[0] - 1] linspace_y = [0, (self.new_shape[1] - im_ud) // 2 - 1, self.new_shape[1] - (self.new_shape[1] - im_ud) // 2 - 1, self.new_shape[1] - 1] linspace_x_seq = [1, 2, 3] linspace_y_seq = [1, 2, 3] r_x = random.choice(linspace_x_seq) r_y = random.choice(linspace_y_seq) perturbed_p = np.array( [random.randint(linspace_x[r_x-1] * 10, linspace_x[r_x] * 10), random.randint(linspace_y[r_y-1] * 10, linspace_y[r_y] * 10)])/10 if ((r_x == 1 or r_x == 3) and (r_y == 1 or r_y == 3)) and p_pp_choice: linspace_x_seq.remove(r_x) linspace_y_seq.remove(r_y) r_x = random.choice(linspace_x_seq) r_y = random.choice(linspace_y_seq) perturbed_pp = np.array( [random.randint(linspace_x[r_x-1] * 10, linspace_x[r_x] * 10), random.randint(linspace_y[r_y-1] * 10, linspace_y[r_y] * 10)])/10 # perturbed_p, perturbed_pp = np.array( # [random.randint(0, self.new_shape[0] * 10) / 10, # random.randint(0, self.new_shape[1] * 10) / 10]) \ # , np.array([random.randint(0, self.new_shape[0] * 10) / 10, # random.randint(0, self.new_shape[1] * 10) / 10]) # perturbed_p, perturbed_pp = np.array( # [random.randint((self.new_shape[0]-im_lr)//2*10, (self.new_shape[0]-(self.new_shape[0]-im_lr)//2) * 10) / 10, # random.randint((self.new_shape[1]-im_ud)//2*10, (self.new_shape[1]-(self.new_shape[1]-im_ud)//2) * 10) / 10]) \ # , np.array([random.randint((self.new_shape[0]-im_lr)//2*10, (self.new_shape[0]-(self.new_shape[0]-im_lr)//2) * 10) / 10, # random.randint((self.new_shape[1]-im_ud)//2*10, (self.new_shape[1]-(self.new_shape[1]-im_ud)//2) * 10) / 10]) '''''' perturbed_vp = perturbed_pp - perturbed_p perturbed_vp_norm = np.linalg.norm(perturbed_vp) perturbed_distance_vertex_and_line = np.dot((perturbed_p - pixel_position), perturbed_vp) / perturbed_vp_norm '''''' # perturbed_v = np.array([random.randint(-3000, 3000) / 100, random.randint(-3000, 3000) / 100]) # perturbed_v = np.array([random.randint(-4000, 4000) / 100, random.randint(-4000, 4000) / 100]) if fold_curve == 'fold' and self.is_perform(0.6, 0.4): # self.is_perform(0.3, 0.7): # perturbed_v = np.array([random.randint(-9000, 9000) / 100, random.randint(-9000, 9000) / 100]) perturbed_v = np.array([random.randint(-10000, 10000) / 100, random.randint(-10000, 10000) / 100]) # perturbed_v = np.array([random.randint(-11000, 11000) / 100, random.randint(-11000, 11000) / 100]) else: # perturbed_v = np.array([random.randint(-9000, 9000) / 100, random.randint(-9000, 9000) / 100]) # perturbed_v = np.array([random.randint(-16000, 16000) / 100, random.randint(-16000, 16000) / 100]) perturbed_v = np.array([random.randint(-8000, 8000) / 100, random.randint(-8000, 8000) / 100]) # perturbed_v = np.array([random.randint(-3500, 3500) / 100, random.randint(-3500, 3500) / 100]) # perturbed_v = np.array([random.randint(-600, 600) / 10, random.randint(-600, 600) / 10]) '''''' if fold_curve == 'fold': if is_normalizationFun_mixture: if self.is_perform(0.5, 0.5): perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line)) else: perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), random.randint(1, 2)) else: if normalizationFun_0_1: perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), 2) else: perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line)) else: if is_normalizationFun_mixture: if self.is_perform(0.5, 0.5): perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line)) else: perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), random.randint(1, 2)) else: if normalizationFun_0_1: perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), 2) else: perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line)) '''''' if fold_curve_random: # omega_perturbed = (alpha_perturbed+0.2) / (perturbed_d + alpha_perturbed) # omega_perturbed = alpha_perturbed**perturbed_d omega_perturbed = alpha_perturbed / (perturbed_d + alpha_perturbed) else: omega_perturbed = 1 - perturbed_d ** alpha_perturbed '''shadow''' if self.is_perform(0.6, 0.4): synthesis_perturbed_img_map[x_min:x_max, y_min:y_max] = np.minimum(np.maximum(synthesis_perturbed_img_map[x_min:x_max, y_min:y_max] - np.int16(np.round(omega_perturbed[x_min:x_max, y_min:y_max].repeat(3).reshape(x_max-x_min, y_max-y_min, 3) * abs(np.linalg.norm(perturbed_v//2))*np.array([0.4-random.random()*0.1, 0.4-random.random()*0.1, 0.4-random.random()*0.1]))), 0), 255) '''''' if relativeShift_position in ['position', 'relativeShift_v2']: self.perturbed_xy_ += np.array([omega_perturbed * perturbed_v[0], omega_perturbed * perturbed_v[1]]).transpose(1, 2, 0) else: print('relativeShift_position error') exit() ''' flat_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape( self.new_shape[0] * self.new_shape[1], 2) vtx, wts = self.interp_weights(self.perturbed_xy_.reshape(self.new_shape[0] * self.new_shape[1], 2), flat_position) wts_sum = np.abs(wts).sum(-1) # flat_img.reshape(flat_shape[0] * flat_shape[1], 3)[:] = interpolate(pixel, vtx, wts) wts = wts[wts_sum <= 1, :] vtx = vtx[wts_sum <= 1, :] synthesis_perturbed_img.reshape(self.new_shape[0] * self.new_shape[1], 3)[wts_sum <= 1, :] = self.interpolate(synthesis_perturbed_img_map.reshape(self.new_shape[0] * self.new_shape[1], 3), vtx, wts) synthesis_perturbed_label.reshape(self.new_shape[0] * self.new_shape[1], 2)[wts_sum <= 1, :] = self.interpolate(synthesis_perturbed_label_map.reshape(self.new_shape[0] * self.new_shape[1], 2), vtx, wts) foreORbackground_label = np.zeros(self.new_shape) foreORbackground_label.reshape(self.new_shape[0] * self.new_shape[1], 1)[wts_sum <= 1, :] = self.interpolate(foreORbackground_label_map.reshape(self.new_shape[0] * self.new_shape[1], 1), vtx, wts) foreORbackground_label[foreORbackground_label < 0.99] = 0 foreORbackground_label[foreORbackground_label >= 0.99] = 1 # synthesis_perturbed_img = np.around(synthesis_perturbed_img).astype(np.uint8) synthesis_perturbed_label[:, :, 0] *= foreORbackground_label synthesis_perturbed_label[:, :, 1] *= foreORbackground_label synthesis_perturbed_img[:, :, 0] *= foreORbackground_label synthesis_perturbed_img[:, :, 1] *= foreORbackground_label synthesis_perturbed_img[:, :, 2] *= foreORbackground_label self.synthesis_perturbed_img = synthesis_perturbed_img self.synthesis_perturbed_label = synthesis_perturbed_label ''' '''perspective''' perspective_shreshold = random.randint(26, 36)*10 # 280 x_min_per, y_min_per, x_max_per, y_max_per = self.adjust_position(perspective_shreshold, perspective_shreshold, self.new_shape[0]-perspective_shreshold, self.new_shape[1]-perspective_shreshold) pts1 = np.float32([[x_min_per, y_min_per], [x_max_per, y_min_per], [x_min_per, y_max_per], [x_max_per, y_max_per]]) e_1_ = x_max_per - x_min_per e_2_ = y_max_per - y_min_per e_3_ = e_2_ e_4_ = e_1_ perspective_shreshold_h = e_1_*0.02 perspective_shreshold_w = e_2_*0.02 a_min_, a_max_ = 70, 110 # if self.is_perform(1, 0): if fold_curve == 'curve' and self.is_perform(0.5, 0.5): if self.is_perform(0.5, 0.5): while True: pts2 = np.around( np.float32([[x_min_per - (random.random()) * perspective_shreshold, y_min_per + (random.random()) * perspective_shreshold], [x_max_per - (random.random()) * perspective_shreshold, y_min_per - (random.random()) * perspective_shreshold], [x_min_per + (random.random()) * perspective_shreshold, y_max_per + (random.random()) * perspective_shreshold], [x_max_per + (random.random()) * perspective_shreshold, y_max_per - (random.random()) * perspective_shreshold]])) # right e_1 = np.linalg.norm(pts2[0]-pts2[1]) e_2 = np.linalg.norm(pts2[0]-pts2[2]) e_3 = np.linalg.norm(pts2[1]-pts2[3]) e_4 = np.linalg.norm(pts2[2]-pts2[3]) if e_1_+perspective_shreshold_h > e_1 and e_2_+perspective_shreshold_w > e_2 and e_3_+perspective_shreshold_w > e_3 and e_4_+perspective_shreshold_h > e_4 and \ e_1_ - perspective_shreshold_h < e_1 and e_2_ - perspective_shreshold_w < e_2 and e_3_ - perspective_shreshold_w < e_3 and e_4_ - perspective_shreshold_h < e_4 and \ abs(e_1-e_4) < perspective_shreshold_h and abs(e_2-e_3) < perspective_shreshold_w: a0_, a1_, a2_, a3_ = self.get_angle_4(pts2) if (a0_ > a_min_ and a0_ < a_max_) or (a1_ > a_min_ and a1_ < a_max_) or (a2_ > a_min_ and a2_ < a_max_) or (a3_ > a_min_ and a3_ < a_max_): break else: while True: pts2 = np.around( np.float32([[x_min_per + (random.random()) * perspective_shreshold, y_min_per - (random.random()) * perspective_shreshold], [x_max_per + (random.random()) * perspective_shreshold, y_min_per + (random.random()) * perspective_shreshold], [x_min_per - (random.random()) * perspective_shreshold, y_max_per - (random.random()) * perspective_shreshold], [x_max_per - (random.random()) * perspective_shreshold, y_max_per + (random.random()) * perspective_shreshold]])) e_1 = np.linalg.norm(pts2[0]-pts2[1]) e_2 = np.linalg.norm(pts2[0]-pts2[2]) e_3 =
np.linalg.norm(pts2[1]-pts2[3])
numpy.linalg.norm
# coding: utf-8 # Licensed under a 3-clause BSD style license - see LICENSE.rst """ Test the Logarithmic Units and Quantities """ from __future__ import (absolute_import, unicode_literals, division, print_function) from ...extern import six from ...extern.six.moves import zip import pickle import itertools import pytest import numpy as np from numpy.testing.utils import assert_allclose from ...tests.helper import assert_quantity_allclose from ... import units as u, constants as c lu_units = [u.dex, u.mag, u.decibel] lu_subclasses = [u.DexUnit, u.MagUnit, u.DecibelUnit] lq_subclasses = [u.Dex, u.Magnitude, u.Decibel] pu_sample = (u.dimensionless_unscaled, u.m, u.g/u.s**2, u.Jy) class TestLogUnitCreation(object): def test_logarithmic_units(self): """Check logarithmic units are set up correctly.""" assert u.dB.to(u.dex) == 0.1 assert u.dex.to(u.mag) == -2.5 assert u.mag.to(u.dB) == -4 @pytest.mark.parametrize('lu_unit, lu_cls', zip(lu_units, lu_subclasses)) def test_callable_units(self, lu_unit, lu_cls): assert isinstance(lu_unit, u.UnitBase) assert callable(lu_unit) assert lu_unit._function_unit_class is lu_cls @pytest.mark.parametrize('lu_unit', lu_units) def test_equality_to_normal_unit_for_dimensionless(self, lu_unit): lu = lu_unit() assert lu == lu._default_function_unit # eg, MagUnit() == u.mag assert lu._default_function_unit == lu # and u.mag == MagUnit() @pytest.mark.parametrize('lu_unit, physical_unit', itertools.product(lu_units, pu_sample)) def test_call_units(self, lu_unit, physical_unit): """Create a LogUnit subclass using the callable unit and physical unit, and do basic check that output is right.""" lu1 = lu_unit(physical_unit) assert lu1.physical_unit == physical_unit assert lu1.function_unit == lu1._default_function_unit def test_call_invalid_unit(self): with pytest.raises(TypeError): u.mag([]) with pytest.raises(ValueError): u.mag(u.mag()) @pytest.mark.parametrize('lu_cls, physical_unit', itertools.product( lu_subclasses + [u.LogUnit], pu_sample)) def test_subclass_creation(self, lu_cls, physical_unit): """Create a LogUnit subclass object for given physical unit, and do basic check that output is right.""" lu1 = lu_cls(physical_unit) assert lu1.physical_unit == physical_unit assert lu1.function_unit == lu1._default_function_unit lu2 = lu_cls(physical_unit, function_unit=2*lu1._default_function_unit) assert lu2.physical_unit == physical_unit assert lu2.function_unit == u.Unit(2*lu2._default_function_unit) with pytest.raises(ValueError): lu_cls(physical_unit, u.m) def test_predefined_magnitudes(): assert_quantity_allclose((-21.1*u.STmag).physical, 1.*u.erg/u.cm**2/u.s/u.AA) assert_quantity_allclose((-48.6*u.ABmag).physical, 1.*u.erg/u.cm**2/u.s/u.Hz) assert_quantity_allclose((0*u.M_bol).physical, c.L_bol0) assert_quantity_allclose((0*u.m_bol).physical, c.L_bol0/(4.*np.pi*(10.*c.pc)**2)) def test_predefined_reinitialisation(): assert u.mag('ST') == u.STmag assert u.mag('AB') == u.ABmag assert u.mag('Bol') == u.M_bol assert u.mag('bol') == u.m_bol def test_predefined_string_roundtrip(): """Ensure roundtripping; see #5015""" with u.magnitude_zero_points.enable(): assert u.Unit(u.STmag.to_string()) == u.STmag assert u.Unit(u.ABmag.to_string()) == u.ABmag assert u.Unit(u.M_bol.to_string()) == u.M_bol assert u.Unit(u.m_bol.to_string()) == u.m_bol def test_inequality(): """Check __ne__ works (regresssion for #5342).""" lu1 = u.mag(u.Jy) lu2 = u.dex(u.Jy) lu3 = u.mag(u.Jy**2) lu4 = lu3 - lu1 assert lu1 != lu2 assert lu1 != lu3 assert lu1 == lu4 class TestLogUnitStrings(object): def test_str(self): """Do some spot checks that str, repr, etc. work as expected.""" lu1 = u.mag(u.Jy) assert str(lu1) == 'mag(Jy)' assert repr(lu1) == 'Unit("mag(Jy)")' assert lu1.to_string('generic') == 'mag(Jy)' with pytest.raises(ValueError): lu1.to_string('fits') lu2 = u.dex() assert str(lu2) == 'dex' assert repr(lu2) == 'Unit("dex(1)")' assert lu2.to_string() == 'dex(1)' lu3 = u.MagUnit(u.Jy, function_unit=2*u.mag) assert str(lu3) == '2 mag(Jy)' assert repr(lu3) == 'MagUnit("Jy", unit="2 mag")' assert lu3.to_string() == '2 mag(Jy)' lu4 = u.mag(u.ct) assert lu4.to_string('generic') == 'mag(ct)' assert lu4.to_string('latex') == ('$\\mathrm{mag}$$\\mathrm{\\left( ' '\\mathrm{ct} \\right)}$') assert lu4._repr_latex_() == lu4.to_string('latex') class TestLogUnitConversion(object): @pytest.mark.parametrize('lu_unit, physical_unit', itertools.product(lu_units, pu_sample)) def test_physical_unit_conversion(self, lu_unit, physical_unit): """Check various LogUnit subclasses are equivalent and convertible to their non-log counterparts.""" lu1 = lu_unit(physical_unit) assert lu1.is_equivalent(physical_unit) assert lu1.to(physical_unit, 0.) == 1. assert physical_unit.is_equivalent(lu1) assert physical_unit.to(lu1, 1.) == 0. pu = u.Unit(8.*physical_unit) assert lu1.is_equivalent(physical_unit) assert lu1.to(pu, 0.) == 0.125 assert pu.is_equivalent(lu1) assert_allclose(pu.to(lu1, 0.125), 0., atol=1.e-15) # Check we round-trip. value = np.linspace(0., 10., 6) assert_allclose(pu.to(lu1, lu1.to(pu, value)), value, atol=1.e-15) # And that we're not just returning True all the time. pu2 = u.g assert not lu1.is_equivalent(pu2) with pytest.raises(u.UnitsError): lu1.to(pu2) assert not pu2.is_equivalent(lu1) with pytest.raises(u.UnitsError): pu2.to(lu1) @pytest.mark.parametrize('lu_unit', lu_units) def test_container_unit_conversion(self, lu_unit): """Check that conversion to logarithmic units (u.mag, u.dB, u.dex) is only possible when the physical unit is dimensionless.""" values = np.linspace(0., 10., 6) lu1 = lu_unit(u.dimensionless_unscaled) assert lu1.is_equivalent(lu1.function_unit) assert_allclose(lu1.to(lu1.function_unit, values), values) lu2 = lu_unit(u.Jy) assert not lu2.is_equivalent(lu2.function_unit) with pytest.raises(u.UnitsError): lu2.to(lu2.function_unit, values) @pytest.mark.parametrize( 'flu_unit, tlu_unit, physical_unit', itertools.product(lu_units, lu_units, pu_sample)) def test_subclass_conversion(self, flu_unit, tlu_unit, physical_unit): """Check various LogUnit subclasses are equivalent and convertible to each other if they correspond to equivalent physical units.""" values = np.linspace(0., 10., 6) flu = flu_unit(physical_unit) tlu = tlu_unit(physical_unit) assert flu.is_equivalent(tlu) assert_allclose(flu.to(tlu), flu.function_unit.to(tlu.function_unit)) assert_allclose(flu.to(tlu, values), values * flu.function_unit.to(tlu.function_unit)) tlu2 = tlu_unit(u.Unit(100.*physical_unit)) assert flu.is_equivalent(tlu2) # Check that we round-trip. assert_allclose(flu.to(tlu2, tlu2.to(flu, values)), values, atol=1.e-15) tlu3 = tlu_unit(physical_unit.to_system(u.si)[0]) assert flu.is_equivalent(tlu3) assert_allclose(flu.to(tlu3, tlu3.to(flu, values)), values, atol=1.e-15) tlu4 = tlu_unit(u.g) assert not flu.is_equivalent(tlu4) with pytest.raises(u.UnitsError): flu.to(tlu4, values) def test_unit_decomposition(self): lu = u.mag(u.Jy) assert lu.decompose() == u.mag(u.Jy.decompose()) assert lu.decompose().physical_unit.bases == [u.kg, u.s] assert lu.si == u.mag(u.Jy.si) assert lu.si.physical_unit.bases == [u.kg, u.s] assert lu.cgs == u.mag(u.Jy.cgs) assert lu.cgs.physical_unit.bases == [u.g, u.s] def test_unit_multiple_possible_equivalencies(self): lu = u.mag(u.Jy) assert lu.is_equivalent(pu_sample) class TestLogUnitArithmetic(object): def test_multiplication_division(self): """Check that multiplication/division with other units is only possible when the physical unit is dimensionless, and that this turns the unit into a normal one.""" lu1 = u.mag(u.Jy) with pytest.raises(u.UnitsError): lu1 * u.m with pytest.raises(u.UnitsError): u.m * lu1 with pytest.raises(u.UnitsError): lu1 / lu1 for unit in (u.dimensionless_unscaled, u.m, u.mag, u.dex): with pytest.raises(u.UnitsError): lu1 / unit lu2 = u.mag(u.dimensionless_unscaled) with pytest.raises(u.UnitsError): lu2 * lu1 with pytest.raises(u.UnitsError): lu2 / lu1 # But dimensionless_unscaled can be cancelled. assert lu2 / lu2 == u.dimensionless_unscaled # With dimensionless, normal units are OK, but we return a plain unit. tf = lu2 * u.m tr = u.m * lu2 for t in (tf, tr): assert not isinstance(t, type(lu2)) assert t == lu2.function_unit * u.m with u.set_enabled_equivalencies(u.logarithmic()): with pytest.raises(u.UnitsError): t.to(lu2.physical_unit) # Now we essentially have a LogUnit with a prefactor of 100, # so should be equivalent again. t = tf / u.cm with u.set_enabled_equivalencies(u.logarithmic()): assert t.is_equivalent(lu2.function_unit) assert_allclose(t.to(u.dimensionless_unscaled, np.arange(3.)/100.), lu2.to(lu2.physical_unit, np.arange(3.))) # If we effectively remove lu1, a normal unit should be returned. t2 = tf / lu2 assert not isinstance(t2, type(lu2)) assert t2 == u.m t3 = tf / lu2.function_unit assert not isinstance(t3, type(lu2)) assert t3 == u.m # For completeness, also ensure non-sensical operations fail with pytest.raises(TypeError): lu1 * object() with pytest.raises(TypeError): slice(None) * lu1 with pytest.raises(TypeError): lu1 / [] with pytest.raises(TypeError): 1 / lu1 @pytest.mark.parametrize('power', (2, 0.5, 1, 0)) def test_raise_to_power(self, power): """Check that raising LogUnits to some power is only possible when the physical unit is dimensionless, and that conversion is turned off when the resulting logarithmic unit (such as mag**2) is incompatible.""" lu1 = u.mag(u.Jy) if power == 0: assert lu1 ** power == u.dimensionless_unscaled elif power == 1: assert lu1 ** power == lu1 else: with pytest.raises(u.UnitsError): lu1 ** power # With dimensionless, though, it works, but returns a normal unit. lu2 = u.mag(u.dimensionless_unscaled) t = lu2**power if power == 0: assert t == u.dimensionless_unscaled elif power == 1: assert t == lu2 else: assert not isinstance(t, type(lu2)) assert t == lu2.function_unit**power # also check we roundtrip t2 = t**(1./power) assert t2 == lu2.function_unit with u.set_enabled_equivalencies(u.logarithmic()): assert_allclose(t2.to(u.dimensionless_unscaled, np.arange(3.)), lu2.to(lu2.physical_unit, np.arange(3.))) @pytest.mark.parametrize('other', pu_sample) def test_addition_subtraction_to_normal_units_fails(self, other): lu1 = u.mag(u.Jy) with pytest.raises(u.UnitsError): lu1 + other with pytest.raises(u.UnitsError): lu1 - other with pytest.raises(u.UnitsError): other - lu1 def test_addition_subtraction_to_non_units_fails(self): lu1 = u.mag(u.Jy) with pytest.raises(TypeError): lu1 + 1. with pytest.raises(TypeError): lu1 - [1., 2., 3.] @pytest.mark.parametrize( 'other', (u.mag, u.mag(), u.mag(u.Jy), u.mag(u.m), u.Unit(2*u.mag), u.MagUnit('', 2.*u.mag))) def test_addition_subtraction(self, other): """Check physical units are changed appropriately""" lu1 = u.mag(u.Jy) other_pu = getattr(other, 'physical_unit', u.dimensionless_unscaled) lu_sf = lu1 + other assert lu_sf.is_equivalent(lu1.physical_unit * other_pu) lu_sr = other + lu1 assert lu_sr.is_equivalent(lu1.physical_unit * other_pu) lu_df = lu1 - other assert lu_df.is_equivalent(lu1.physical_unit / other_pu) lu_dr = other - lu1 assert lu_dr.is_equivalent(other_pu / lu1.physical_unit) def test_complicated_addition_subtraction(self): """for fun, a more complicated example of addition and subtraction""" dm0 = u.Unit('DM', 1./(4.*np.pi*(10.*u.pc)**2)) lu_dm = u.mag(dm0) lu_absST = u.STmag - lu_dm assert lu_absST.is_equivalent(u.erg/u.s/u.AA) def test_neg_pos(self): lu1 = u.mag(u.Jy) neg_lu = -lu1 assert neg_lu != lu1 assert neg_lu.physical_unit == u.Jy**-1 assert -neg_lu == lu1 pos_lu = +lu1 assert pos_lu is not lu1 assert pos_lu == lu1 def test_pickle(): lu1 = u.dex(u.cm/u.s**2) s = pickle.dumps(lu1) lu2 = pickle.loads(s) assert lu1 == lu2 def test_hashable(): lu1 = u.dB(u.mW) lu2 = u.dB(u.m) lu3 = u.dB(u.mW) assert hash(lu1) != hash(lu2) assert hash(lu1) == hash(lu3) luset = {lu1, lu2, lu3} assert len(luset) == 2 class TestLogQuantityCreation(object): @pytest.mark.parametrize('lq, lu', zip(lq_subclasses + [u.LogQuantity], lu_subclasses + [u.LogUnit])) def test_logarithmic_quantities(self, lq, lu): """Check logarithmic quantities are all set up correctly""" assert lq._unit_class == lu assert type(lu()._quantity_class(1.)) is lq @pytest.mark.parametrize('lq_cls, physical_unit', itertools.product(lq_subclasses, pu_sample)) def test_subclass_creation(self, lq_cls, physical_unit): """Create LogQuantity subclass objects for some physical units, and basic check on transformations""" value = np.arange(1., 10.) log_q = lq_cls(value * physical_unit) assert log_q.unit.physical_unit == physical_unit assert log_q.unit.function_unit == log_q.unit._default_function_unit assert_allclose(log_q.physical.value, value) with pytest.raises(ValueError): lq_cls(value, physical_unit) @pytest.mark.parametrize( 'unit', (u.mag, u.mag(), u.mag(u.Jy), u.mag(u.m), u.Unit(2*u.mag), u.MagUnit('', 2.*u.mag), u.MagUnit(u.Jy, -1*u.mag), u.MagUnit(u.m, -2.*u.mag))) def test_different_units(self, unit): q = u.Magnitude(1.23, unit) assert q.unit.function_unit == getattr(unit, 'function_unit', unit) assert q.unit.physical_unit is getattr(unit, 'physical_unit', u.dimensionless_unscaled) @pytest.mark.parametrize('value, unit', ( (1.*u.mag(u.Jy), None), (1.*u.dex(u.Jy), None), (1.*u.mag(u.W/u.m**2/u.Hz), u.mag(u.Jy)), (1.*u.dex(u.W/u.m**2/u.Hz), u.mag(u.Jy)))) def test_function_values(self, value, unit): lq = u.Magnitude(value, unit) assert lq == value assert lq.unit.function_unit == u.mag assert lq.unit.physical_unit == getattr(unit, 'physical_unit', value.unit.physical_unit) @pytest.mark.parametrize( 'unit', (u.mag(), u.mag(u.Jy), u.mag(u.m), u.MagUnit('', 2.*u.mag), u.MagUnit(u.Jy, -1*u.mag), u.MagUnit(u.m, -2.*u.mag))) def test_indirect_creation(self, unit): q1 = 2.5 * unit assert isinstance(q1, u.Magnitude) assert q1.value == 2.5 assert q1.unit == unit pv = 100. * unit.physical_unit q2 = unit * pv assert q2.unit == unit assert q2.unit.physical_unit == pv.unit assert q2.to_value(unit.physical_unit) == 100. assert (q2._function_view / u.mag).to_value(1) == -5. q3 = unit / 0.4 assert q3 == q1 def test_from_view(self): # Cannot view a physical quantity as a function quantity, since the # values would change. q = [100., 1000.] * u.cm/u.s**2 with pytest.raises(TypeError): q.view(u.Dex) # But fine if we have the right magnitude. q = [2., 3.] * u.dex lq = q.view(u.Dex) assert isinstance(lq, u.Dex) assert lq.unit.physical_unit == u.dimensionless_unscaled assert np.all(q == lq) def test_using_quantity_class(self): """Check that we can use Quantity if we have subok=True""" # following issue #5851 lu = u.dex(u.AA) with pytest.raises(u.UnitTypeError): u.Quantity(1., lu) q = u.Quantity(1., lu, subok=True) assert type(q) is lu._quantity_class def test_conversion_to_and_from_physical_quantities(): """Ensures we can convert from regular quantities.""" mst = [10., 12., 14.] * u.STmag flux_lambda = mst.physical mst_roundtrip = flux_lambda.to(u.STmag) # check we return a logquantity; see #5178. assert isinstance(mst_roundtrip, u.Magnitude) assert mst_roundtrip.unit == mst.unit assert_allclose(mst_roundtrip.value, mst.value) wave = [4956.8, 4959.55, 4962.3] * u.AA flux_nu = mst.to(u.Jy, equivalencies=u.spectral_density(wave)) mst_roundtrip2 = flux_nu.to(u.STmag, u.spectral_density(wave)) assert isinstance(mst_roundtrip2, u.Magnitude) assert mst_roundtrip2.unit == mst.unit assert_allclose(mst_roundtrip2.value, mst.value) def test_quantity_decomposition(): lq = 10.*u.mag(u.Jy) assert lq.decompose() == lq assert lq.decompose().unit.physical_unit.bases == [u.kg, u.s] assert lq.si == lq assert lq.si.unit.physical_unit.bases == [u.kg, u.s] assert lq.cgs == lq assert lq.cgs.unit.physical_unit.bases == [u.g, u.s] class TestLogQuantityViews(object): def setup(self): self.lq = u.Magnitude(np.arange(10.) * u.Jy) self.lq2 = u.Magnitude(np.arange(5.)) def test_value_view(self): lq_value = self.lq.value assert type(lq_value) is np.ndarray lq_value[2] = -1. assert
np.all(self.lq.value == lq_value)
numpy.all
# pvtrace is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # pvtrace is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import numpy as np from external.transformations import translation_matrix, rotation_matrix import external.transformations as tf from Trace import Photon from Geometry import Box, Cylinder, FinitePlane, transform_point, transform_direction, rotation_matrix_from_vector_alignment, norm from Materials import Spectrum def random_spherecial_vector(): # This method of calculating isotropic vectors is taken from GNU Scientific Library LOOP = True while LOOP: x = -1. + 2. * np.random.uniform() y = -1. + 2. * np.random.uniform() s = x**2 + y**2 if s <= 1.0: LOOP = False z = -1. + 2. * s a = 2 * np.sqrt(1 - s) x = a * x y = a * y return np.array([x,y,z]) class SimpleSource(object): """A light source that will generate photons of a single colour, direction and position.""" def __init__(self, position=[0,0,0], direction=[0,0,1], wavelength=555, use_random_polarisation=False): super(SimpleSource, self).__init__() self.position = position self.direction = direction self.wavelength = wavelength self.use_random_polarisation = use_random_polarisation self.throw = 0 self.source_id = "SimpleSource_" + str(id(self)) def photon(self): photon = Photon() photon.source = self.source_id photon.position = np.array(self.position) photon.direction = np.array(self.direction) photon.active = True photon.wavelength = self.wavelength # If use_polarisation is set generate a random polarisation vector of the photon if self.use_random_polarisation: # Randomise rotation angle around xy-plane, the transform from +z to the direction of the photon vec = random_spherecial_vector() vec[2] = 0. vec = norm(vec) R = rotation_matrix_from_vector_alignment(self.direction, [0,0,1]) photon.polarisation = transform_direction(vec, R) else: photon.polarisation = None photon.id = self.throw self.throw = self.throw + 1 return photon class Laser(object): """A light source that will generate photons of a single colour, direction and position.""" def __init__(self, position=[0,0,0], direction=[0,0,1], wavelength=555, polarisation=None): super(Laser, self).__init__() self.position = np.array(position) self.direction = np.array(direction) self.wavelength = wavelength assert polarisation != None, "Polarisation of the Laser is not set." self.polarisation = np.array(polarisation) self.throw = 0 self.source_id = "LaserSource_" + str(id(self)) def photon(self): photon = Photon() photon.source = self.source_id photon.position = np.array(self.position) photon.direction = np.array(self.direction) photon.active = True photon.wavelength = self.wavelength photon.polarisation = self.polarisation photon.id = self.throw self.throw = self.throw + 1 return photon class PlanarSource(object): """A box that emits photons from the top surface (normal), sampled from the spectrum.""" def __init__(self, spectrum=None, wavelength=555, direction=(0,0,1), length=0.05, width=0.05): super(PlanarSource, self).__init__() self.spectrum = spectrum self.wavelength = wavelength self.plane = FinitePlane(length=length, width=width) self.length = length self.width = width # direction is the direction that photons are fired out of the plane in the GLOBAL FRAME. # i.e. this is passed directly to the photon to set is's direction self.direction = direction self.throw = 0 self.source_id = "PlanarSource_" + str(id(self)) def translate(self, translation): self.plane.append_transform(tf.translation_matrix(translation)) def rotate(self, angle, axis): self.plane.append_transform(tf.rotation_matrix(angle, axis)) def photon(self): photon = Photon() photon.source = self.source_id photon.id = self.throw self.throw = self.throw + 1 # Create a point which is on the surface of the finite plane in it's local frame x = np.random.uniform(0., self.length) y = np.random.uniform(0., self.width) local_point = (x, y, 0.) # Transform the direciton photon.position = transform_point(local_point, self.plane.transform) photon.direction = self.direction photon.active = True if self.spectrum != None: photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform()) else: photon.wavelength = self.wavelength return photon class LensSource(object): """ A source where photons generated in a plane are focused on a line with space tolerance given by variable "focussize". The focus line should be perpendicular to the plane normal and aligned with the z-axis. """ def __init__(self, spectrum = None, wavelength = 555, linepoint=(0,0,0), linedirection=(0,0,1), focussize = 0, planeorigin = (-1,-1,-1), planeextent = (-1,1,1)): super(LensSource, self).__init__() self.spectrum = spectrum self.wavelength = wavelength self.planeorigin = planeorigin self.planeextent = planeextent self.linepoint = np.array(linepoint) self.linedirection = np.array(linedirection) self.focussize = focussize self.throw = 0 self.source_id = "LensSource_" + str(id(self)) def photon(self): photon = Photon() photon.source = self.source_id photon.id = self.throw self.throw = self.throw + 1 # Position x = np.random.uniform(self.planeorigin[0],self.planeextent[0]) y = np.random.uniform(self.planeorigin[1],self.planeextent[1]) z = np.random.uniform(self.planeorigin[2],self.planeextent[2]) photon.position = np.array((x,y,z)) # Direction focuspoint = np.array((0.,0.,0.)) focuspoint[0] = self.linepoint[0] + np.random.uniform(-self.focussize,self.focussize) focuspoint[1] = self.linepoint[1] + np.random.uniform(-self.focussize,self.focussize) focuspoint[2] = photon.position[2] direction = focuspoint - photon.position modulus = (direction[0]**2+direction[1]**2+direction[2]**2)**0.5 photon.direction = direction/modulus # Wavelength if self.spectrum != None: photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform()) else: photon.wavelength = self.wavelength return photon class LensSourceAngle(object): """ A source where photons generated in a plane are focused on a line with space tolerance given by variable "focussize". The focus line should be perpendicular to the plane normal and aligned with the z-axis. For this lense an additional z-boost is added (Angle of incidence in z-direction). """ def __init__(self, spectrum = None, wavelength = 555, linepoint=(0,0,0), linedirection=(0,0,1), angle = 0, focussize = 0, planeorigin = (-1,-1,-1), planeextent = (-1,1,1)): super(LensSourceAngle, self).__init__() self.spectrum = spectrum self.wavelength = wavelength self.planeorigin = planeorigin self.planeextent = planeextent self.linepoint = np.array(linepoint) self.linedirection = np.array(linedirection) self.focussize = focussize self.angle = angle self.throw = 0 self.source_id = "LensSourceAngle_" + str(id(self)) def photon(self): photon = Photon() photon.id = self.throw self.throw = self.throw + 1 # Position x =
np.random.uniform(self.planeorigin[0],self.planeextent[0])
numpy.random.uniform
import numpy as np import sys import os from PIL import Image from visu.helper_functions import save_image from scipy.spatial.transform import Rotation as R from helper import re_quat import copy import torch import numpy as np import k3d class Visualizer(): def __init__(self, p_visu, writer=None): if p_visu[-1] != '/': p_visu = p_visu + '/' self.p_visu = p_visu self.writer = writer if not os.path.exists(self.p_visu): os.makedirs(self.p_visu) def plot_estimated_pose(self, tag, epoch, img, points, trans=[[0, 0, 0]], rot_mat=[[1, 0, 0], [0, 1, 0], [0, 0, 1]], cam_cx=0, cam_cy=0, cam_fx=0, cam_fy=0, store=False, jupyter=False, w=2): """ tag := tensorboard tag epoch := tensorboard epoche store := ture -> stores the image to standard path path := != None creats the path and store to it path/tag.png img:= original_image, [widht,height,RGB] points:= points of the object model [length,x,y,z] trans: [1,3] rot: [3,3] """ img_d = copy.deepcopy(img) points = np.dot(points, rot_mat.T) points = np.add(points, trans[0, :]) for i in range(0, points.shape[0]): p_x = points[i, 0] p_y = points[i, 1] p_z = points[i, 2] u = int(((p_x / p_z) * cam_fx) + cam_cx) v = int(((p_y / p_z) * cam_fy) + cam_cy) try: img_d[v - w:v + w + 1, u - w:u + w + 1, 0] = 0 img_d[v - w:v + w + 1, u - w:u + w + 1, 1] = 255 img_d[v - w:v + w + 1, u - w:u + w + 1, 0] = 0 except: #print("out of bounce") pass if jupyter: display(Image.fromarray(img_d)) if store: #store_ar = (img_d* 255).round().astype(np.uint8) #print("IMAGE D:" ,img_d,img_d.shape ) save_image(img_d, tag=str(epoch) + tag, p_store=self.p_visu) if self.writer is not None: self.writer.add_image(tag, img_d.astype( np.uint8), global_step=epoch, dataformats='HWC') def plot_bounding_box(self, tag, epoch, img, rmin=0, rmax=0, cmin=0, cmax=0, str_width=2, store=False, jupyter=False, b=None): """ tag := tensorboard tag epoch := tensorboard epoche store := ture -> stores the image to standard path path := != None creats the path and store to it path/tag.png img:= original_image, [widht,height,RGB] """ if isinstance(b, dict): rmin = b['rmin'] rmax = b['rmax'] cmin = b['cmin'] cmax = b['cmax'] # ToDo check Input data img_d = np.array(copy.deepcopy(img)) c = [0, 0, 255] rmin_mi = max(0, rmin - str_width) rmin_ma = min(img_d.shape[0], rmin + str_width) rmax_mi = max(0, rmax - str_width) rmax_ma = min(img_d.shape[0], rmax + str_width) cmin_mi = max(0, cmin - str_width) cmin_ma = min(img_d.shape[1], cmin + str_width) cmax_mi = max(0, cmax - str_width) cmax_ma = min(img_d.shape[1], cmax + str_width) img_d[rmin_mi:rmin_ma, cmin:cmax, :] = c img_d[rmax_mi:rmax_ma, cmin:cmax, :] = c img_d[rmin:rmax, cmin_mi:cmin_ma, :] = c img_d[rmin:rmax, cmax_mi:cmax_ma, :] = c print("STORE", store) img_d = img_d.astype(np.uint8) if store: #store_ar = (img_d* 255).round().astype(np.uint8) save_image(img_d, tag=str(epoch) + tag, p_store=self.p_visu) if jupyter: display(Image.fromarray(img_d)) if self.writer is not None: self.writer.add_image(tag, img_d.astype( np.uint8), global_step=epoch, dataformats='HWC') def plot_pcd(x, point_size=0.005, c='g'): """ x: point_nr,3 """ if c == 'b': k = 245 elif c == 'g': k = 25811000 elif c == 'r': k = 11801000 elif c == 'black': k = 2580 else: k = 2580 colors = np.ones(x.shape[0]) * k plot = k3d.plot(name='points') plt_points = k3d.points(x, colors.astype(np.uint32), point_size=point_size) plot += plt_points plt_points.shader = '3d' plot.display() def plot_two_pcd(x, y, point_size=0.005, c1='g', c2='r'): if c1 == 'b': k = 245 elif c1 == 'g': k = 25811000 elif c1 == 'r': k = 11801000 elif c1 == 'black': k = 2580 else: k = 2580 if c2 == 'b': k2 = 245 elif c2 == 'g': k2 = 25811000 elif c2 == 'r': k2 = 11801000 elif c2 == 'black': k2 = 2580 else: k2 = 2580 col1 = np.ones(x.shape[0]) * k col2 = np.ones(y.shape[0]) * k2 plot = k3d.plot(name='points') plt_points = k3d.points(x, col1.astype(np.uint32), point_size=point_size) plot += plt_points plt_points = k3d.points(y, col2.astype(np.uint32), point_size=point_size) plot += plt_points plt_points.shader = '3d' plot.display() class SequenceVisualizer(): def __init__(self, seq_data, images_path, output_path=None): self.seq_data = seq_data self.images_path = images_path self.output_path = output_path def plot_points_on_image(self, seq_no, frame_no, jupyter=False, store=False, pose_type='filtered'): seq_data = self.seq_data images_path = self.images_path output_path = self.output_path frame = seq_data[seq_no][frame_no] unique_desig = frame['dl_dict']['unique_desig'][0] if pose_type == 'ground_truth': # ground truth t = frame['dl_dict']['gt_trans'].reshape(1, 3) rot_quat = re_quat(copy.deepcopy( frame['dl_dict']['gt_rot_wxyz'][0]), 'wxyz') rot = R.from_quat(rot_quat).as_matrix() elif pose_type == 'filtered': # filter pred t = np.array(frame['filter_pred']['t']).reshape(1, 3) rot_quat = re_quat(copy.deepcopy( frame['filter_pred']['r_wxyz']), 'wxyz') rot = R.from_quat(rot_quat).as_matrix() elif pose_type == 'final_pred_obs': # final pred t = np.array(frame['final_pred_obs']['t']).reshape(1, 3) rot_quat = re_quat(copy.deepcopy( frame['final_pred_obs']['r_wxyz']), 'wxyz') rot = R.from_quat(rot_quat).as_matrix() else: raise Exception('Pose type not implemented.') w = 2 if type(unique_desig) != str: im = np.array(Image.open( images_path + unique_desig[0] + '-color.png')) # ycb else: im = np.array(Image.open( images_path + unique_desig + '.png')) # laval img_d = copy.deepcopy(im) dl_dict = frame['dl_dict'] points = copy.deepcopy( seq_data[seq_no][0]['dl_dict']['model_points'][0, :, :]) points = np.dot(points, rot.T) points =
np.add(points, t[0, :])
numpy.add
# -*- coding: utf-8 -*- """ Created on Thu Nov 28 12:10:11 2019 @author: Omer """ ## File handler ## This file was initially intended purely to generate the matrices for the near earth code found in: https://public.ccsds.org/Pubs/131x1o2e2s.pdf ## The values from the above pdf were copied manually to a txt file, and it is the purpose of this file to parse it. ## The emphasis here is on correctness, I currently do not see a reason to generalise this file, since matrices will be saved in either json or some matrix friendly format. import numpy as np from scipy.linalg import circulant #import matplotlib.pyplot as plt import scipy.io import common import hashlib import os projectDir = os.environ.get('LDPC') if projectDir == None: import pathlib projectDir = pathlib.Path(__file__).parent.absolute() ## <NAME>: added on 01/12/2020, need to make sure this doesn't break anything. import sys sys.path.insert(1, projectDir) FILE_HANDLER_INT_DATA_TYPE = np.int32 GENERAL_CODE_MATRIX_DATA_TYPE = np.int32 NIBBLE_CONVERTER = np.array([8, 4, 2, 1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) def nibbleToHex(inputArray): n = NIBBLE_CONVERTER.dot(inputArray) if n == 10: h = 'A' elif n== 11: h = 'B' elif n== 12: h = 'C' elif n== 13: h = 'D' elif n== 14: h = 'E' elif n== 15: h = 'F' else: h = str(n) return h def binaryArraytoHex(inputArray): d1 = len(inputArray) assert (d1 % 4 == 0) outputArray = np.zeros(d1//4, dtype = str) outputString = '' for j in range(d1//4): nibble = inputArray[4 * j : 4 * j + 4] h = nibbleToHex(nibble) outputArray[j] = h outputString = outputString + h return outputArray, outputString def hexStringToBinaryArray(hexString): outputBinary = np.array([], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) for i in hexString: if i == '0': nibble = np.array([0,0,0,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '1': nibble = np.array([0,0,0,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '2': nibble = np.array([0,0,1,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '3': nibble = np.array([0,0,1,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '4': nibble = np.array([0,1,0,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '5': nibble = np.array([0,1,0,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '6': nibble = np.array([0,1,1,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '7': nibble = np.array([0,1,1,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '8': nibble = np.array([1,0,0,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '9': nibble = np.array([1,0,0,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == 'A': nibble = np.array([1,0,1,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == 'B': nibble = np.array([1,0,1,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == 'C': nibble = np.array([1,1,0,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == 'D': nibble =
np.array([1,1,0,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
numpy.array
"""Routines for numerical differentiation.""" from __future__ import division import numpy as np from numpy.linalg import norm from scipy.sparse.linalg import LinearOperator from ..sparse import issparse, csc_matrix, csr_matrix, coo_matrix, find from ._group_columns import group_dense, group_sparse EPS = np.finfo(np.float64).eps def _adjust_scheme_to_bounds(x0, h, num_steps, scheme, lb, ub): """Adjust final difference scheme to the presence of bounds. Parameters ---------- x0 : ndarray, shape (n,) Point at which we wish to estimate derivative. h : ndarray, shape (n,) Desired finite difference steps. num_steps : int Number of `h` steps in one direction required to implement finite difference scheme. For example, 2 means that we need to evaluate f(x0 + 2 * h) or f(x0 - 2 * h) scheme : {'1-sided', '2-sided'} Whether steps in one or both directions are required. In other words '1-sided' applies to forward and backward schemes, '2-sided' applies to center schemes. lb : ndarray, shape (n,) Lower bounds on independent variables. ub : ndarray, shape (n,) Upper bounds on independent variables. Returns ------- h_adjusted : ndarray, shape (n,) Adjusted step sizes. Step size decreases only if a sign flip or switching to one-sided scheme doesn't allow to take a full step. use_one_sided : ndarray of bool, shape (n,) Whether to switch to one-sided scheme. Informative only for ``scheme='2-sided'``. """ if scheme == '1-sided': use_one_sided = np.ones_like(h, dtype=bool) elif scheme == '2-sided': h = np.abs(h) use_one_sided = np.zeros_like(h, dtype=bool) else: raise ValueError("`scheme` must be '1-sided' or '2-sided'.") if np.all((lb == -np.inf) & (ub == np.inf)): return h, use_one_sided h_total = h * num_steps h_adjusted = h.copy() lower_dist = x0 - lb upper_dist = ub - x0 if scheme == '1-sided': x = x0 + h_total violated = (x < lb) | (x > ub) fitting = np.abs(h_total) <= np.maximum(lower_dist, upper_dist) h_adjusted[violated & fitting] *= -1 forward = (upper_dist >= lower_dist) & ~fitting h_adjusted[forward] = upper_dist[forward] / num_steps backward = (upper_dist < lower_dist) & ~fitting h_adjusted[backward] = -lower_dist[backward] / num_steps elif scheme == '2-sided': central = (lower_dist >= h_total) & (upper_dist >= h_total) forward = (upper_dist >= lower_dist) & ~central h_adjusted[forward] = np.minimum( h[forward], 0.5 * upper_dist[forward] / num_steps) use_one_sided[forward] = True backward = (upper_dist < lower_dist) & ~central h_adjusted[backward] = -np.minimum( h[backward], 0.5 * lower_dist[backward] / num_steps) use_one_sided[backward] = True min_dist = np.minimum(upper_dist, lower_dist) / num_steps adjusted_central = (~central & (np.abs(h_adjusted) <= min_dist)) h_adjusted[adjusted_central] = min_dist[adjusted_central] use_one_sided[adjusted_central] = False return h_adjusted, use_one_sided relative_step = {"2-point": EPS**0.5, "3-point": EPS**(1/3), "cs": EPS**0.5} def _compute_absolute_step(rel_step, x0, method): if rel_step is None: rel_step = relative_step[method] sign_x0 = (x0 >= 0).astype(float) * 2 - 1 return rel_step * sign_x0 * np.maximum(1.0, np.abs(x0)) def _prepare_bounds(bounds, x0): lb, ub = [np.asarray(b, dtype=float) for b in bounds] if lb.ndim == 0: lb = np.resize(lb, x0.shape) if ub.ndim == 0: ub = np.resize(ub, x0.shape) return lb, ub def group_columns(A, order=0): """Group columns of a 2-D matrix for sparse finite differencing [1]_. Two columns are in the same group if in each row at least one of them has zero. A greedy sequential algorithm is used to construct groups. Parameters ---------- A : array_like or sparse matrix, shape (m, n) Matrix of which to group columns. order : int, iterable of int with shape (n,) or None Permutation array which defines the order of columns enumeration. If int or None, a random permutation is used with `order` used as a random seed. Default is 0, that is use a random permutation but guarantee repeatability. Returns ------- groups : ndarray of int, shape (n,) Contains values from 0 to n_groups-1, where n_groups is the number of found groups. Each value ``groups[i]`` is an index of a group to which ith column assigned. The procedure was helpful only if n_groups is significantly less than n. References ---------- .. [1] <NAME>, <NAME>, and <NAME>, "On the estimation of sparse Jacobian matrices", Journal of the Institute of Mathematics and its Applications, 13 (1974), pp. 117-120. """ if issparse(A): A = csc_matrix(A) else: A = np.atleast_2d(A) A = (A != 0).astype(np.int32) if A.ndim != 2: raise ValueError("`A` must be 2-dimensional.") m, n = A.shape if order is None or np.isscalar(order): rng = np.random.RandomState(order) order = rng.permutation(n) else: order = np.asarray(order) if order.shape != (n,): raise ValueError("`order` has incorrect shape.") A = A[:, order] if issparse(A): groups = group_sparse(m, n, A.indices, A.indptr) else: groups = group_dense(m, n, A) groups[order] = groups.copy() return groups def approx_derivative(fun, x0, method='3-point', rel_step=None, f0=None, bounds=(-np.inf, np.inf), sparsity=None, as_linear_operator=False, args=(), kwargs={}): """Compute finite difference approximation of the derivatives of a vector-valued function. If a function maps from R^n to R^m, its derivatives form m-by-n matrix called the Jacobian, where an element (i, j) is a partial derivative of f[i] with respect to x[j]. Parameters ---------- fun : callable Function of which to estimate the derivatives. The argument x passed to this function is ndarray of shape (n,) (never a scalar even if n=1). It must return 1-D array_like of shape (m,) or a scalar. x0 : array_like of shape (n,) or float Point at which to estimate the derivatives. Float will be converted to a 1-D array. method : {'3-point', '2-point', 'cs'}, optional Finite difference method to use: - '2-point' - use the first order accuracy forward or backward difference. - '3-point' - use central difference in interior points and the second order accuracy forward or backward difference near the boundary. - 'cs' - use a complex-step finite difference scheme. This assumes that the user function is real-valued and can be analytically continued to the complex plane. Otherwise, produces bogus results. rel_step : None or array_like, optional Relative step size to use. The absolute step size is computed as ``h = rel_step * sign(x0) * max(1, abs(x0))``, possibly adjusted to fit into the bounds. For ``method='3-point'`` the sign of `h` is ignored. If None (default) then step is selected automatically, see Notes. f0 : None or array_like, optional If not None it is assumed to be equal to ``fun(x0)``, in this case the ``fun(x0)`` is not called. Default is None. bounds : tuple of array_like, optional Lower and upper bounds on independent variables. Defaults to no bounds. Each bound must match the size of `x0` or be a scalar, in the latter case the bound will be the same for all variables. Use it to limit the range of function evaluation. Bounds checking is not implemented when `as_linear_operator` is True. sparsity : {None, array_like, sparse matrix, 2-tuple}, optional Defines a sparsity structure of the Jacobian matrix. If the Jacobian matrix is known to have only few non-zero elements in each row, then it's possible to estimate its several columns by a single function evaluation [3]_. To perform such economic computations two ingredients are required: * structure : array_like or sparse matrix of shape (m, n). A zero element means that a corresponding element of the Jacobian identically equals to zero. * groups : array_like of shape (n,). A column grouping for a given sparsity structure, use `group_columns` to obtain it. A single array or a sparse matrix is interpreted as a sparsity structure, and groups are computed inside the function. A tuple is interpreted as (structure, groups). If None (default), a standard dense differencing will be used. Note, that sparse differencing makes sense only for large Jacobian matrices where each row contains few non-zero elements. as_linear_operator : bool, optional When True the function returns an `scipy.sparse.linalg.LinearOperator`. Otherwise it returns a dense array or a sparse matrix depending on `sparsity`. The linear operator provides an efficient way of computing ``J.dot(p)`` for any vector ``p`` of shape (n,), but does not allow direct access to individual elements of the matrix. By default `as_linear_operator` is False. args, kwargs : tuple and dict, optional Additional arguments passed to `fun`. Both empty by default. The calling signature is ``fun(x, *args, **kwargs)``. Returns ------- J : {ndarray, sparse matrix, LinearOperator} Finite difference approximation of the Jacobian matrix. If `as_linear_operator` is True returns a LinearOperator with shape (m, n). Otherwise it returns a dense array or sparse matrix depending on how `sparsity` is defined. If `sparsity` is None then a ndarray with shape (m, n) is returned. If `sparsity` is not None returns a csr_matrix with shape (m, n). For sparse matrices and linear operators it is always returned as a 2-D structure, for ndarrays, if m=1 it is returned as a 1-D gradient array with shape (n,). See Also -------- check_derivative : Check correctness of a function computing derivatives. Notes ----- If `rel_step` is not provided, it assigned to ``EPS**(1/s)``, where EPS is machine epsilon for float64 numbers, s=2 for '2-point' method and s=3 for '3-point' method. Such relative step approximately minimizes a sum of truncation and round-off errors, see [1]_. A finite difference scheme for '3-point' method is selected automatically. The well-known central difference scheme is used for points sufficiently far from the boundary, and 3-point forward or backward scheme is used for points near the boundary. Both schemes have the second-order accuracy in terms of Taylor expansion. Refer to [2]_ for the formulas of 3-point forward and backward difference schemes. For dense differencing when m=1 Jacobian is returned with a shape (n,), on the other hand when n=1 Jacobian is returned with a shape (m, 1). Our motivation is the following: a) It handles a case of gradient computation (m=1) in a conventional way. b) It clearly separates these two different cases. b) In all cases np.atleast_2d can be called to get 2-D Jacobian with correct dimensions. References ---------- .. [1] W. H. Press et. al. "Numerical Recipes. The Art of Scientific Computing. 3rd edition", sec. 5.7. .. [2] <NAME>, <NAME>, and <NAME>, "On the estimation of sparse Jacobian matrices", Journal of the Institute of Mathematics and its Applications, 13 (1974), pp. 117-120. .. [3] <NAME>, "Generation of Finite Difference Formulas on Arbitrarily Spaced Grids", Mathematics of Computation 51, 1988. Examples -------- >>> import numpy as np >>> from scipy.optimize import approx_derivative >>> >>> def f(x, c1, c2): ... return np.array([x[0] * np.sin(c1 * x[1]), ... x[0] * np.cos(c2 * x[1])]) ... >>> x0 = np.array([1.0, 0.5 * np.pi]) >>> approx_derivative(f, x0, args=(1, 2)) array([[ 1., 0.], [-1., 0.]]) Bounds can be used to limit the region of function evaluation. In the example below we compute left and right derivative at point 1.0. >>> def g(x): ... return x**2 if x >= 1 else x ... >>> x0 = 1.0 >>> approx_derivative(g, x0, bounds=(-np.inf, 1.0)) array([ 1.]) >>> approx_derivative(g, x0, bounds=(1.0, np.inf)) array([ 2.]) """ if method not in ['2-point', '3-point', 'cs']: raise ValueError("Unknown method '%s'. " % method) x0 = np.atleast_1d(x0) if x0.ndim > 1: raise ValueError("`x0` must have at most 1 dimension.") lb, ub = _prepare_bounds(bounds, x0) if lb.shape != x0.shape or ub.shape != x0.shape: raise ValueError("Inconsistent shapes between bounds and `x0`.") if as_linear_operator and not (np.all(np.isinf(lb)) and np.all(np.isinf(ub))): raise ValueError("Bounds not supported when " "`as_linear_operator` is True.") def fun_wrapped(x): f = np.atleast_1d(fun(x, *args, **kwargs)) if f.ndim > 1: raise RuntimeError("`fun` return value has " "more than 1 dimension.") return f if f0 is None: f0 = fun_wrapped(x0) else: f0 = np.atleast_1d(f0) if f0.ndim > 1: raise ValueError("`f0` passed has more than 1 dimension.") if np.any((x0 < lb) | (x0 > ub)): raise ValueError("`x0` violates bound constraints.") if as_linear_operator: if rel_step is None: rel_step = relative_step[method] return _linear_operator_difference(fun_wrapped, x0, f0, rel_step, method) else: h = _compute_absolute_step(rel_step, x0, method) if method == '2-point': h, use_one_sided = _adjust_scheme_to_bounds( x0, h, 1, '1-sided', lb, ub) elif method == '3-point': h, use_one_sided = _adjust_scheme_to_bounds( x0, h, 1, '2-sided', lb, ub) elif method == 'cs': use_one_sided = False if sparsity is None: return _dense_difference(fun_wrapped, x0, f0, h, use_one_sided, method) else: if not issparse(sparsity) and len(sparsity) == 2: structure, groups = sparsity else: structure = sparsity groups = group_columns(sparsity) if issparse(structure): structure = csc_matrix(structure) else: structure = np.atleast_2d(structure) groups =
np.atleast_1d(groups)
numpy.atleast_1d
# ________ # / # \ / # \ / # \/ import random import textwrap import emd_mean import AdvEMDpy import emd_basis import emd_utils import numpy as np import pandas as pd import cvxpy as cvx import seaborn as sns import matplotlib.pyplot as plt from scipy.integrate import odeint from scipy.ndimage import gaussian_filter from emd_utils import time_extension, Utility from scipy.interpolate import CubicSpline from emd_hilbert import Hilbert, hilbert_spectrum from emd_preprocess import Preprocess from emd_mean import Fluctuation from AdvEMDpy import EMD # alternate packages from PyEMD import EMD as pyemd0215 import emd as emd040 sns.set(style='darkgrid') pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001) pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time) pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series) # plot 0 - addition fig = plt.figure(figsize=(9, 4)) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('First Iteration of Sifting Algorithm') plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1) plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()], c='r', label=r'$M(t_i)$', zorder=2) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4) plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()], c='c', label=r'$m(t_j)$', zorder=3) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5) plt.yticks(ticks=[-2, -1, 0, 1, 2]) plt.xticks(ticks=[0, np.pi, 2 * np.pi], labels=[r'0', r'$\pi$', r'$2\pi$']) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/pseudo_algorithm.png') plt.show() knots = np.arange(12) time = np.linspace(0, 11, 1101) basis = emd_basis.Basis(time=time, time_series=time) b_spline_basis = basis.cubic_b_spline(knots) chsi_basis = basis.chsi_basis(knots) # plot 1 plt.title('Non-Natural Cubic B-Spline Bases at Boundary') plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $') plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $') plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $') plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $') plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $') plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $']) plt.xlim(4.4, 6.6) plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.legend(loc='upper left') plt.savefig('jss_figures/boundary_bases.png') plt.show() # plot 1a - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) knots_uniform = np.linspace(0, 2 * np.pi, 51) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0] fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Uniform Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Uniform Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Uniform Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots_uniform)): axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_uniform.png') plt.show() # plot 1b - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=1, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Statically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Statically Optimised Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Statically Optimised Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots)): axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_1.png') plt.show() # plot 1c - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=2, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Dynamically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Dynamically Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Dynamically Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots[i])): axs[i].plot(knots[i][j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_2.png') plt.show() # plot 1d - addition window = 81 fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Filtering Demonstration') axs[1].set_title('Zoomed Region') preprocess_time = pseudo_alg_time.copy() np.random.seed(1) random.seed(1) preprocess_time_series = pseudo_alg_time_series + np.random.normal(0, 0.1, len(preprocess_time)) for i in random.sample(range(1000), 500): preprocess_time_series[i] += np.random.normal(0, 1) preprocess = Preprocess(time=preprocess_time, time_series=preprocess_time_series) axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[0].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[0].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[0].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[1].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[1].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[1].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_filter.png') plt.show() # plot 1e - addition fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Smoothing Demonstration') axs[1].set_title('Zoomed Region') axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[0].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) downsampled_and_decimated = preprocess.downsample() axs[0].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 11)) downsampled = preprocess.downsample(decimate=False) axs[0].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[1].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) axs[1].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 13)) axs[1].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.06, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.06, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_smooth.png') plt.show() # plot 2 fig, axs = plt.subplots(1, 2, sharey=True) axs[0].set_title('Cubic B-Spline Bases') axs[0].plot(time, b_spline_basis[2, :].T, '--', label='Basis 1') axs[0].plot(time, b_spline_basis[3, :].T, '--', label='Basis 2') axs[0].plot(time, b_spline_basis[4, :].T, '--', label='Basis 3') axs[0].plot(time, b_spline_basis[5, :].T, '--', label='Basis 4') axs[0].legend(loc='upper left') axs[0].plot(5 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].plot(6 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].set_xticks([5, 6]) axs[0].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[0].set_xlim(4.5, 6.5) axs[1].set_title('Cubic Hermite Spline Bases') axs[1].plot(time, chsi_basis[10, :].T, '--') axs[1].plot(time, chsi_basis[11, :].T, '--') axs[1].plot(time, chsi_basis[12, :].T, '--') axs[1].plot(time, chsi_basis[13, :].T, '--') axs[1].plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].set_xticks([5, 6]) axs[1].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[1].set_xlim(4.5, 6.5) plt.savefig('jss_figures/comparing_bases.png') plt.show() # plot 3 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_dash = maxima_y[-1] * np.ones_like(max_dash_time) min_dash_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_dash = minima_y[-1] * np.ones_like(min_dash_time) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) max_discard = maxima_y[-1] max_discard_time = minima_x[-1] - maxima_x[-1] + minima_x[-1] max_discard_dash_time = np.linspace(max_discard_time - width, max_discard_time + width, 101) max_discard_dash = max_discard * np.ones_like(max_discard_dash_time) dash_2_time = np.linspace(minima_x[-1], max_discard_time, 101) dash_2 = np.linspace(minima_y[-1], max_discard, 101) end_point_time = time[-1] end_point = time_series[-1] time_reflect = np.linspace((5 - a) * np.pi, (5 + a) * np.pi, 101) time_series_reflect = np.flip(np.cos(np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101)) + np.cos(5 * np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101))) time_series_anti_reflect = time_series_reflect[0] - time_series_reflect utils = emd_utils.Utility(time=time, time_series=time_series_anti_reflect) anti_max_bool = utils.max_bool_func_1st_order_fd() anti_max_point_time = time_reflect[anti_max_bool] anti_max_point = time_series_anti_reflect[anti_max_bool] utils = emd_utils.Utility(time=time, time_series=time_series_reflect) no_anchor_max_time = time_reflect[utils.max_bool_func_1st_order_fd()] no_anchor_max = time_series_reflect[utils.max_bool_func_1st_order_fd()] point_1 = 5.4 length_distance = np.linspace(maxima_y[-1], minima_y[-1], 101) length_distance_time = point_1 * np.pi * np.ones_like(length_distance) length_time = np.linspace(point_1 * np.pi - width, point_1 * np.pi + width, 101) length_top = maxima_y[-1] * np.ones_like(length_time) length_bottom = minima_y[-1] * np.ones_like(length_time) point_2 = 5.2 length_distance_2 = np.linspace(time_series[-1], minima_y[-1], 101) length_distance_time_2 = point_2 * np.pi * np.ones_like(length_distance_2) length_time_2 = np.linspace(point_2 * np.pi - width, point_2 * np.pi + width, 101) length_top_2 = time_series[-1] * np.ones_like(length_time_2) length_bottom_2 = minima_y[-1] * np.ones_like(length_time_2) symmetry_axis_1_time = minima_x[-1] * np.ones(101) symmetry_axis_2_time = time[-1] * np.ones(101) symmetry_axis = np.linspace(-2, 2, 101) end_time = np.linspace(time[-1] - width, time[-1] + width, 101) end_signal = time_series[-1] * np.ones_like(end_time) anti_symmetric_time = np.linspace(time[-1] - 0.5, time[-1] + 0.5, 101) anti_symmetric_signal = time_series[-1] * np.ones_like(anti_symmetric_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Symmetry Edge Effects Example') plt.plot(time_reflect, time_series_reflect, 'g--', LineWidth=2, label=textwrap.fill('Symmetric signal', 10)) plt.plot(time_reflect[:51], time_series_anti_reflect[:51], '--', c='purple', LineWidth=2, label=textwrap.fill('Anti-symmetric signal', 10)) plt.plot(max_dash_time, max_dash, 'k-') plt.plot(min_dash_time, min_dash, 'k-') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(length_distance_time, length_distance, 'k--') plt.plot(length_distance_time_2, length_distance_2, 'k--') plt.plot(length_time, length_top, 'k-') plt.plot(length_time, length_bottom, 'k-') plt.plot(length_time_2, length_top_2, 'k-') plt.plot(length_time_2, length_bottom_2, 'k-') plt.plot(end_time, end_signal, 'k-') plt.plot(symmetry_axis_1_time, symmetry_axis, 'r--', zorder=1) plt.plot(anti_symmetric_time, anti_symmetric_signal, 'r--', zorder=1) plt.plot(symmetry_axis_2_time, symmetry_axis, 'r--', label=textwrap.fill('Axes of symmetry', 10), zorder=1) plt.text(5.1 * np.pi, -0.7, r'$\beta$L') plt.text(5.34 * np.pi, -0.05, 'L') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(max_discard_time, max_discard, c='purple', zorder=4, label=textwrap.fill('Symmetric Discard maxima', 10)) plt.scatter(end_point_time, end_point, c='orange', zorder=4, label=textwrap.fill('Symmetric Anchor maxima', 10)) plt.scatter(anti_max_point_time, anti_max_point, c='green', zorder=4, label=textwrap.fill('Anti-Symmetric maxima', 10)) plt.scatter(no_anchor_max_time, no_anchor_max, c='gray', zorder=4, label=textwrap.fill('Symmetric maxima', 10)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_symmetry_anti.png') plt.show() # plot 4 a = 0.21 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_1 = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_dash_2 = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_dash_time_1 = maxima_x[-1] * np.ones_like(max_dash_1) max_dash_time_2 = maxima_x[-2] * np.ones_like(max_dash_1) min_dash_1 = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_dash_2 = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_dash_time_1 = minima_x[-1] * np.ones_like(min_dash_1) min_dash_time_2 = minima_x[-2] *
np.ones_like(min_dash_1)
numpy.ones_like
''' <NAME> set up :2020-1-9 intergrate img and label into one file -- fiducial1024_v1 ''' import argparse import sys, os import pickle import random import collections import json import numpy as np import scipy.io as io import scipy.misc as m import matplotlib.pyplot as plt import glob import math import time import threading import multiprocessing as mp from multiprocessing import Pool import re import cv2 # sys.path.append('/lustre/home/gwxie/hope/project/dewarp/datasets/') # /lustre/home/gwxie/program/project/unwarp/perturbed_imgaes/GAN import utils def getDatasets(dir): return os.listdir(dir) class perturbed(utils.BasePerturbed): def __init__(self, path, bg_path, save_path, save_suffix): self.path = path self.bg_path = bg_path self.save_path = save_path self.save_suffix = save_suffix def save_img(self, m, n, fold_curve='fold', repeat_time=4, fiducial_points = 16, relativeShift_position='relativeShift_v2'): origin_img = cv2.imread(self.path, flags=cv2.IMREAD_COLOR) save_img_shape = [512*2, 480*2] # 320 # reduce_value = np.random.choice([2**4, 2**5, 2**6, 2**7, 2**8], p=[0.01, 0.1, 0.4, 0.39, 0.1]) reduce_value = np.random.choice([2*2, 4*2, 8*2, 16*2, 24*2, 32*2, 40*2, 48*2], p=[0.02, 0.18, 0.2, 0.3, 0.1, 0.1, 0.08, 0.02]) # reduce_value = np.random.choice([8*2, 16*2, 24*2, 32*2, 40*2, 48*2], p=[0.01, 0.02, 0.2, 0.4, 0.19, 0.18]) # reduce_value = np.random.choice([16, 24, 32, 40, 48, 64], p=[0.01, 0.1, 0.2, 0.4, 0.2, 0.09]) base_img_shrink = save_img_shape[0] - reduce_value # enlarge_img_shrink = [1024, 768] # enlarge_img_shrink = [896, 672] # 420 enlarge_img_shrink = [512*4, 480*4] # 420 # enlarge_img_shrink = [896*2, 768*2] # 420 # enlarge_img_shrink = [896, 768] # 420 # enlarge_img_shrink = [768, 576] # 420 # enlarge_img_shrink = [640, 480] # 420 '''''' im_lr = origin_img.shape[0] im_ud = origin_img.shape[1] reduce_value_v2 = np.random.choice([2*2, 4*2, 8*2, 16*2, 24*2, 28*2, 32*2, 48*2], p=[0.02, 0.18, 0.2, 0.2, 0.1, 0.1, 0.1, 0.1]) # reduce_value_v2 = np.random.choice([16, 24, 28, 32, 48, 64], p=[0.01, 0.1, 0.2, 0.3, 0.25, 0.14]) if im_lr > im_ud: im_ud = min(int(im_ud / im_lr * base_img_shrink), save_img_shape[1] - reduce_value_v2) im_lr = save_img_shape[0] - reduce_value else: base_img_shrink = save_img_shape[1] - reduce_value im_lr = min(int(im_lr / im_ud * base_img_shrink), save_img_shape[0] - reduce_value_v2) im_ud = base_img_shrink if round(im_lr / im_ud, 2) < 0.5 or round(im_ud / im_lr, 2) < 0.5: repeat_time = min(repeat_time, 8) edge_padding = 3 im_lr -= im_lr % (fiducial_points-1) - (2*edge_padding) # im_lr % (fiducial_points-1) - 1 im_ud -= im_ud % (fiducial_points-1) - (2*edge_padding) # im_ud % (fiducial_points-1) - 1 im_hight = np.linspace(edge_padding, im_lr - edge_padding, fiducial_points, dtype=np.int64) im_wide = np.linspace(edge_padding, im_ud - edge_padding, fiducial_points, dtype=np.int64) # im_lr -= im_lr % (fiducial_points-1) - (1+2*edge_padding) # im_lr % (fiducial_points-1) - 1 # im_ud -= im_ud % (fiducial_points-1) - (1+2*edge_padding) # im_ud % (fiducial_points-1) - 1 # im_hight = np.linspace(edge_padding, im_lr - (1+edge_padding), fiducial_points, dtype=np.int64) # im_wide = np.linspace(edge_padding, im_ud - (1+edge_padding), fiducial_points, dtype=np.int64) im_x, im_y = np.meshgrid(im_hight, im_wide) segment_x = (im_lr) // (fiducial_points-1) segment_y = (im_ud) // (fiducial_points-1) # plt.plot(im_x, im_y, # color='limegreen', # marker='.', # linestyle='') # plt.grid(True) # plt.show() self.origin_img = cv2.resize(origin_img, (im_ud, im_lr), interpolation=cv2.INTER_CUBIC) perturbed_bg_ = getDatasets(self.bg_path) perturbed_bg_img_ = self.bg_path+random.choice(perturbed_bg_) perturbed_bg_img = cv2.imread(perturbed_bg_img_, flags=cv2.IMREAD_COLOR) mesh_shape = self.origin_img.shape[:2] self.synthesis_perturbed_img = np.full((enlarge_img_shrink[0], enlarge_img_shrink[1], 3), 256, dtype=np.float32)#np.zeros_like(perturbed_bg_img) # self.synthesis_perturbed_img = np.full((enlarge_img_shrink[0], enlarge_img_shrink[1], 3), 0, dtype=np.int16)#np.zeros_like(perturbed_bg_img) self.new_shape = self.synthesis_perturbed_img.shape[:2] perturbed_bg_img = cv2.resize(perturbed_bg_img, (save_img_shape[1], save_img_shape[0]), cv2.INPAINT_TELEA) origin_pixel_position = np.argwhere(np.zeros(mesh_shape, dtype=np.uint32) == 0).reshape(mesh_shape[0], mesh_shape[1], 2) pixel_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape(self.new_shape[0], self.new_shape[1], 2) self.perturbed_xy_ = np.zeros((self.new_shape[0], self.new_shape[1], 2)) # self.perturbed_xy_ = pixel_position.copy().astype(np.float32) # fiducial_points_grid = origin_pixel_position[im_x, im_y] self.synthesis_perturbed_label = np.zeros((self.new_shape[0], self.new_shape[1], 2)) x_min, y_min, x_max, y_max = self.adjust_position_v2(0, 0, mesh_shape[0], mesh_shape[1], save_img_shape) origin_pixel_position += [x_min, y_min] x_min, y_min, x_max, y_max = self.adjust_position(0, 0, mesh_shape[0], mesh_shape[1]) x_shift = random.randint(-enlarge_img_shrink[0]//16, enlarge_img_shrink[0]//16) y_shift = random.randint(-enlarge_img_shrink[1]//16, enlarge_img_shrink[1]//16) x_min += x_shift x_max += x_shift y_min += y_shift y_max += y_shift '''im_x,y''' im_x += x_min im_y += y_min self.synthesis_perturbed_img[x_min:x_max, y_min:y_max] = self.origin_img self.synthesis_perturbed_label[x_min:x_max, y_min:y_max] = origin_pixel_position synthesis_perturbed_img_map = self.synthesis_perturbed_img.copy() synthesis_perturbed_label_map = self.synthesis_perturbed_label.copy() foreORbackground_label = np.full((mesh_shape), 1, dtype=np.int16) foreORbackground_label_map = np.full((self.new_shape), 0, dtype=np.int16) foreORbackground_label_map[x_min:x_max, y_min:y_max] = foreORbackground_label # synthesis_perturbed_img_map = self.pad(self.synthesis_perturbed_img.copy(), x_min, y_min, x_max, y_max) # synthesis_perturbed_label_map = self.pad(synthesis_perturbed_label_map, x_min, y_min, x_max, y_max) '''*****************************************************************''' is_normalizationFun_mixture = self.is_perform(0.2, 0.8) # if not is_normalizationFun_mixture: normalizationFun_0_1 = False # normalizationFun_0_1 = self.is_perform(0.5, 0.5) if fold_curve == 'fold': fold_curve_random = True # is_normalizationFun_mixture = False normalizationFun_0_1 = self.is_perform(0.2, 0.8) if is_normalizationFun_mixture: alpha_perturbed = random.randint(80, 120) / 100 else: if normalizationFun_0_1 and repeat_time < 8: alpha_perturbed = random.randint(50, 70) / 100 else: alpha_perturbed = random.randint(70, 130) / 100 else: fold_curve_random = self.is_perform(0.1, 0.9) # False # self.is_perform(0.01, 0.99) alpha_perturbed = random.randint(80, 160) / 100 # is_normalizationFun_mixture = False # self.is_perform(0.01, 0.99) synthesis_perturbed_img = np.full_like(self.synthesis_perturbed_img, 256) # synthesis_perturbed_img = np.full_like(self.synthesis_perturbed_img, 0, dtype=np.int16) synthesis_perturbed_label = np.zeros_like(self.synthesis_perturbed_label) alpha_perturbed_change = self.is_perform(0.5, 0.5) p_pp_choice = self.is_perform(0.8, 0.2) if fold_curve == 'fold' else self.is_perform(0.1, 0.9) for repeat_i in range(repeat_time): if alpha_perturbed_change: if fold_curve == 'fold': if is_normalizationFun_mixture: alpha_perturbed = random.randint(80, 120) / 100 else: if normalizationFun_0_1 and repeat_time < 8: alpha_perturbed = random.randint(50, 70) / 100 else: alpha_perturbed = random.randint(70, 130) / 100 else: alpha_perturbed = random.randint(80, 160) / 100 '''''' linspace_x = [0, (self.new_shape[0] - im_lr) // 2 - 1, self.new_shape[0] - (self.new_shape[0] - im_lr) // 2 - 1, self.new_shape[0] - 1] linspace_y = [0, (self.new_shape[1] - im_ud) // 2 - 1, self.new_shape[1] - (self.new_shape[1] - im_ud) // 2 - 1, self.new_shape[1] - 1] linspace_x_seq = [1, 2, 3] linspace_y_seq = [1, 2, 3] r_x = random.choice(linspace_x_seq) r_y = random.choice(linspace_y_seq) perturbed_p = np.array( [random.randint(linspace_x[r_x-1] * 10, linspace_x[r_x] * 10), random.randint(linspace_y[r_y-1] * 10, linspace_y[r_y] * 10)])/10 if ((r_x == 1 or r_x == 3) and (r_y == 1 or r_y == 3)) and p_pp_choice: linspace_x_seq.remove(r_x) linspace_y_seq.remove(r_y) r_x = random.choice(linspace_x_seq) r_y = random.choice(linspace_y_seq) perturbed_pp = np.array( [random.randint(linspace_x[r_x-1] * 10, linspace_x[r_x] * 10), random.randint(linspace_y[r_y-1] * 10, linspace_y[r_y] * 10)])/10 # perturbed_p, perturbed_pp = np.array( # [random.randint(0, self.new_shape[0] * 10) / 10, # random.randint(0, self.new_shape[1] * 10) / 10]) \ # , np.array([random.randint(0, self.new_shape[0] * 10) / 10, # random.randint(0, self.new_shape[1] * 10) / 10]) # perturbed_p, perturbed_pp = np.array( # [random.randint((self.new_shape[0]-im_lr)//2*10, (self.new_shape[0]-(self.new_shape[0]-im_lr)//2) * 10) / 10, # random.randint((self.new_shape[1]-im_ud)//2*10, (self.new_shape[1]-(self.new_shape[1]-im_ud)//2) * 10) / 10]) \ # , np.array([random.randint((self.new_shape[0]-im_lr)//2*10, (self.new_shape[0]-(self.new_shape[0]-im_lr)//2) * 10) / 10, # random.randint((self.new_shape[1]-im_ud)//2*10, (self.new_shape[1]-(self.new_shape[1]-im_ud)//2) * 10) / 10]) '''''' perturbed_vp = perturbed_pp - perturbed_p perturbed_vp_norm = np.linalg.norm(perturbed_vp) perturbed_distance_vertex_and_line = np.dot((perturbed_p - pixel_position), perturbed_vp) / perturbed_vp_norm '''''' # perturbed_v = np.array([random.randint(-3000, 3000) / 100, random.randint(-3000, 3000) / 100]) # perturbed_v = np.array([random.randint(-4000, 4000) / 100, random.randint(-4000, 4000) / 100]) if fold_curve == 'fold' and self.is_perform(0.6, 0.4): # self.is_perform(0.3, 0.7): # perturbed_v = np.array([random.randint(-9000, 9000) / 100, random.randint(-9000, 9000) / 100]) perturbed_v = np.array([random.randint(-10000, 10000) / 100, random.randint(-10000, 10000) / 100]) # perturbed_v = np.array([random.randint(-11000, 11000) / 100, random.randint(-11000, 11000) / 100]) else: # perturbed_v = np.array([random.randint(-9000, 9000) / 100, random.randint(-9000, 9000) / 100]) # perturbed_v = np.array([random.randint(-16000, 16000) / 100, random.randint(-16000, 16000) / 100]) perturbed_v = np.array([random.randint(-8000, 8000) / 100, random.randint(-8000, 8000) / 100]) # perturbed_v = np.array([random.randint(-3500, 3500) / 100, random.randint(-3500, 3500) / 100]) # perturbed_v = np.array([random.randint(-600, 600) / 10, random.randint(-600, 600) / 10]) '''''' if fold_curve == 'fold': if is_normalizationFun_mixture: if self.is_perform(0.5, 0.5): perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line)) else: perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), random.randint(1, 2)) else: if normalizationFun_0_1: perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), 2) else: perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line)) else: if is_normalizationFun_mixture: if self.is_perform(0.5, 0.5): perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line)) else: perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), random.randint(1, 2)) else: if normalizationFun_0_1: perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), 2) else: perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line)) '''''' if fold_curve_random: # omega_perturbed = (alpha_perturbed+0.2) / (perturbed_d + alpha_perturbed) # omega_perturbed = alpha_perturbed**perturbed_d omega_perturbed = alpha_perturbed / (perturbed_d + alpha_perturbed) else: omega_perturbed = 1 - perturbed_d ** alpha_perturbed '''shadow''' if self.is_perform(0.6, 0.4): synthesis_perturbed_img_map[x_min:x_max, y_min:y_max] = np.minimum(np.maximum(synthesis_perturbed_img_map[x_min:x_max, y_min:y_max] - np.int16(np.round(omega_perturbed[x_min:x_max, y_min:y_max].repeat(3).reshape(x_max-x_min, y_max-y_min, 3) * abs(np.linalg.norm(perturbed_v//2))*np.array([0.4-random.random()*0.1, 0.4-random.random()*0.1, 0.4-random.random()*0.1]))), 0), 255) '''''' if relativeShift_position in ['position', 'relativeShift_v2']: self.perturbed_xy_ += np.array([omega_perturbed * perturbed_v[0], omega_perturbed * perturbed_v[1]]).transpose(1, 2, 0) else: print('relativeShift_position error') exit() ''' flat_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape( self.new_shape[0] * self.new_shape[1], 2) vtx, wts = self.interp_weights(self.perturbed_xy_.reshape(self.new_shape[0] * self.new_shape[1], 2), flat_position) wts_sum = np.abs(wts).sum(-1) # flat_img.reshape(flat_shape[0] * flat_shape[1], 3)[:] = interpolate(pixel, vtx, wts) wts = wts[wts_sum <= 1, :] vtx = vtx[wts_sum <= 1, :] synthesis_perturbed_img.reshape(self.new_shape[0] * self.new_shape[1], 3)[wts_sum <= 1, :] = self.interpolate(synthesis_perturbed_img_map.reshape(self.new_shape[0] * self.new_shape[1], 3), vtx, wts) synthesis_perturbed_label.reshape(self.new_shape[0] * self.new_shape[1], 2)[wts_sum <= 1, :] = self.interpolate(synthesis_perturbed_label_map.reshape(self.new_shape[0] * self.new_shape[1], 2), vtx, wts) foreORbackground_label = np.zeros(self.new_shape) foreORbackground_label.reshape(self.new_shape[0] * self.new_shape[1], 1)[wts_sum <= 1, :] = self.interpolate(foreORbackground_label_map.reshape(self.new_shape[0] * self.new_shape[1], 1), vtx, wts) foreORbackground_label[foreORbackground_label < 0.99] = 0 foreORbackground_label[foreORbackground_label >= 0.99] = 1 # synthesis_perturbed_img = np.around(synthesis_perturbed_img).astype(np.uint8) synthesis_perturbed_label[:, :, 0] *= foreORbackground_label synthesis_perturbed_label[:, :, 1] *= foreORbackground_label synthesis_perturbed_img[:, :, 0] *= foreORbackground_label synthesis_perturbed_img[:, :, 1] *= foreORbackground_label synthesis_perturbed_img[:, :, 2] *= foreORbackground_label self.synthesis_perturbed_img = synthesis_perturbed_img self.synthesis_perturbed_label = synthesis_perturbed_label ''' '''perspective''' perspective_shreshold = random.randint(26, 36)*10 # 280 x_min_per, y_min_per, x_max_per, y_max_per = self.adjust_position(perspective_shreshold, perspective_shreshold, self.new_shape[0]-perspective_shreshold, self.new_shape[1]-perspective_shreshold) pts1 = np.float32([[x_min_per, y_min_per], [x_max_per, y_min_per], [x_min_per, y_max_per], [x_max_per, y_max_per]]) e_1_ = x_max_per - x_min_per e_2_ = y_max_per - y_min_per e_3_ = e_2_ e_4_ = e_1_ perspective_shreshold_h = e_1_*0.02 perspective_shreshold_w = e_2_*0.02 a_min_, a_max_ = 70, 110 # if self.is_perform(1, 0): if fold_curve == 'curve' and self.is_perform(0.5, 0.5): if self.is_perform(0.5, 0.5): while True: pts2 = np.around( np.float32([[x_min_per - (random.random()) * perspective_shreshold, y_min_per + (random.random()) * perspective_shreshold], [x_max_per - (random.random()) * perspective_shreshold, y_min_per - (random.random()) * perspective_shreshold], [x_min_per + (random.random()) * perspective_shreshold, y_max_per + (random.random()) * perspective_shreshold], [x_max_per + (random.random()) * perspective_shreshold, y_max_per - (random.random()) * perspective_shreshold]])) # right e_1 = np.linalg.norm(pts2[0]-pts2[1]) e_2 = np.linalg.norm(pts2[0]-pts2[2]) e_3 = np.linalg.norm(pts2[1]-pts2[3]) e_4 = np.linalg.norm(pts2[2]-pts2[3]) if e_1_+perspective_shreshold_h > e_1 and e_2_+perspective_shreshold_w > e_2 and e_3_+perspective_shreshold_w > e_3 and e_4_+perspective_shreshold_h > e_4 and \ e_1_ - perspective_shreshold_h < e_1 and e_2_ - perspective_shreshold_w < e_2 and e_3_ - perspective_shreshold_w < e_3 and e_4_ - perspective_shreshold_h < e_4 and \ abs(e_1-e_4) < perspective_shreshold_h and abs(e_2-e_3) < perspective_shreshold_w: a0_, a1_, a2_, a3_ = self.get_angle_4(pts2) if (a0_ > a_min_ and a0_ < a_max_) or (a1_ > a_min_ and a1_ < a_max_) or (a2_ > a_min_ and a2_ < a_max_) or (a3_ > a_min_ and a3_ < a_max_): break else: while True: pts2 = np.around( np.float32([[x_min_per + (random.random()) * perspective_shreshold, y_min_per - (random.random()) * perspective_shreshold], [x_max_per + (random.random()) * perspective_shreshold, y_min_per + (random.random()) * perspective_shreshold], [x_min_per - (random.random()) * perspective_shreshold, y_max_per - (random.random()) * perspective_shreshold], [x_max_per - (random.random()) * perspective_shreshold, y_max_per + (random.random()) * perspective_shreshold]])) e_1 = np.linalg.norm(pts2[0]-pts2[1]) e_2 = np.linalg.norm(pts2[0]-pts2[2]) e_3 = np.linalg.norm(pts2[1]-pts2[3]) e_4 = np.linalg.norm(pts2[2]-pts2[3]) if e_1_+perspective_shreshold_h > e_1 and e_2_+perspective_shreshold_w > e_2 and e_3_+perspective_shreshold_w > e_3 and e_4_+perspective_shreshold_h > e_4 and \ e_1_ - perspective_shreshold_h < e_1 and e_2_ - perspective_shreshold_w < e_2 and e_3_ - perspective_shreshold_w < e_3 and e_4_ - perspective_shreshold_h < e_4 and \ abs(e_1-e_4) < perspective_shreshold_h and abs(e_2-e_3) < perspective_shreshold_w: a0_, a1_, a2_, a3_ = self.get_angle_4(pts2) if (a0_ > a_min_ and a0_ < a_max_) or (a1_ > a_min_ and a1_ < a_max_) or (a2_ > a_min_ and a2_ < a_max_) or (a3_ > a_min_ and a3_ < a_max_): break else: while True: pts2 = np.around(np.float32([[x_min_per+(random.random()-0.5)*perspective_shreshold, y_min_per+(random.random()-0.5)*perspective_shreshold], [x_max_per+(random.random()-0.5)*perspective_shreshold, y_min_per+(random.random()-0.5)*perspective_shreshold], [x_min_per+(random.random()-0.5)*perspective_shreshold, y_max_per+(random.random()-0.5)*perspective_shreshold], [x_max_per+(random.random()-0.5)*perspective_shreshold, y_max_per+(random.random()-0.5)*perspective_shreshold]])) e_1 = np.linalg.norm(pts2[0]-pts2[1]) e_2 = np.linalg.norm(pts2[0]-pts2[2]) e_3 = np.linalg.norm(pts2[1]-pts2[3]) e_4 = np.linalg.norm(pts2[2]-pts2[3]) if e_1_+perspective_shreshold_h > e_1 and e_2_+perspective_shreshold_w > e_2 and e_3_+perspective_shreshold_w > e_3 and e_4_+perspective_shreshold_h > e_4 and \ e_1_ - perspective_shreshold_h < e_1 and e_2_ - perspective_shreshold_w < e_2 and e_3_ - perspective_shreshold_w < e_3 and e_4_ - perspective_shreshold_h < e_4 and \ abs(e_1-e_4) < perspective_shreshold_h and abs(e_2-e_3) < perspective_shreshold_w: a0_, a1_, a2_, a3_ = self.get_angle_4(pts2) if (a0_ > a_min_ and a0_ < a_max_) or (a1_ > a_min_ and a1_ < a_max_) or (a2_ > a_min_ and a2_ < a_max_) or (a3_ > a_min_ and a3_ < a_max_): break M = cv2.getPerspectiveTransform(pts1, pts2) one = np.ones((self.new_shape[0], self.new_shape[1], 1), dtype=np.int16) matr = np.dstack((pixel_position, one)) new = np.dot(M, matr.reshape(-1, 3).T).T.reshape(self.new_shape[0], self.new_shape[1], 3) x = new[:, :, 0]/new[:, :, 2] y = new[:, :, 1]/new[:, :, 2] perturbed_xy_ = np.dstack((x, y)) # perturbed_xy_round_int = np.around(cv2.bilateralFilter(perturbed_xy_round_int, 9, 75, 75)) # perturbed_xy_round_int = np.around(cv2.blur(perturbed_xy_, (17, 17))) # perturbed_xy_round_int = cv2.blur(perturbed_xy_round_int, (17, 17)) # perturbed_xy_round_int = cv2.GaussianBlur(perturbed_xy_round_int, (7, 7), 0) perturbed_xy_ = perturbed_xy_-np.min(perturbed_xy_.T.reshape(2, -1), 1) # perturbed_xy_round_int = np.around(perturbed_xy_round_int-np.min(perturbed_xy_round_int.T.reshape(2, -1), 1)).astype(np.int16) self.perturbed_xy_ += perturbed_xy_ '''perspective end''' '''to img''' flat_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape( self.new_shape[0] * self.new_shape[1], 2) # self.perturbed_xy_ = cv2.blur(self.perturbed_xy_, (7, 7)) self.perturbed_xy_ = cv2.GaussianBlur(self.perturbed_xy_, (7, 7), 0) '''get fiducial points''' fiducial_points_coordinate = self.perturbed_xy_[im_x, im_y] vtx, wts = self.interp_weights(self.perturbed_xy_.reshape(self.new_shape[0] * self.new_shape[1], 2), flat_position) wts_sum = np.abs(wts).sum(-1) # flat_img.reshape(flat_shape[0] * flat_shape[1], 3)[:] = interpolate(pixel, vtx, wts) wts = wts[wts_sum <= 1, :] vtx = vtx[wts_sum <= 1, :] synthesis_perturbed_img.reshape(self.new_shape[0] * self.new_shape[1], 3)[wts_sum <= 1, :] = self.interpolate(synthesis_perturbed_img_map.reshape(self.new_shape[0] * self.new_shape[1], 3), vtx, wts) synthesis_perturbed_label.reshape(self.new_shape[0] * self.new_shape[1], 2)[wts_sum <= 1, :] = self.interpolate(synthesis_perturbed_label_map.reshape(self.new_shape[0] * self.new_shape[1], 2), vtx, wts) foreORbackground_label = np.zeros(self.new_shape) foreORbackground_label.reshape(self.new_shape[0] * self.new_shape[1], 1)[wts_sum <= 1, :] = self.interpolate(foreORbackground_label_map.reshape(self.new_shape[0] * self.new_shape[1], 1), vtx, wts) foreORbackground_label[foreORbackground_label < 0.99] = 0 foreORbackground_label[foreORbackground_label >= 0.99] = 1 self.synthesis_perturbed_img = synthesis_perturbed_img self.synthesis_perturbed_label = synthesis_perturbed_label self.foreORbackground_label = foreORbackground_label '''draw fiducial points stepSize = 0 fiducial_points_synthesis_perturbed_img = self.synthesis_perturbed_img.copy() for l in fiducial_points_coordinate.astype(np.int64).reshape(-1,2): cv2.circle(fiducial_points_synthesis_perturbed_img, (l[1] + math.ceil(stepSize / 2), l[0] + math.ceil(stepSize / 2)), 5, (0, 0, 255), -1) cv2.imwrite('/lustre/home/gwxie/program/project/unwarp/unwarp_perturbed/TPS/img/cv_TPS_large.jpg', fiducial_points_synthesis_perturbed_img) ''' '''clip''' perturbed_x_min, perturbed_y_min, perturbed_x_max, perturbed_y_max = -1, -1, self.new_shape[0], self.new_shape[1] for x in range(self.new_shape[0] // 2, perturbed_x_max): if np.sum(self.synthesis_perturbed_img[x, :]) == 768 * self.new_shape[1] and perturbed_x_max - 1 > x: perturbed_x_max = x break for x in range(self.new_shape[0] // 2, perturbed_x_min, -1): if np.sum(self.synthesis_perturbed_img[x, :]) == 768 * self.new_shape[1] and x > 0: perturbed_x_min = x break for y in range(self.new_shape[1] // 2, perturbed_y_max): if np.sum(self.synthesis_perturbed_img[:, y]) == 768 * self.new_shape[0] and perturbed_y_max - 1 > y: perturbed_y_max = y break for y in range(self.new_shape[1] // 2, perturbed_y_min, -1): if np.sum(self.synthesis_perturbed_img[:, y]) == 768 * self.new_shape[0] and y > 0: perturbed_y_min = y break if perturbed_x_min == 0 or perturbed_x_max == self.new_shape[0] or perturbed_y_min == self.new_shape[1] or perturbed_y_max == self.new_shape[1]: raise Exception('clip error') if perturbed_x_max - perturbed_x_min < im_lr//2 or perturbed_y_max - perturbed_y_min < im_ud//2: raise Exception('clip error') perfix_ = self.save_suffix+'_'+str(m)+'_'+str(n) is_shrink = False if perturbed_x_max - perturbed_x_min > save_img_shape[0] or perturbed_y_max - perturbed_y_min > save_img_shape[1]: is_shrink = True synthesis_perturbed_img = cv2.resize(self.synthesis_perturbed_img[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max, :].copy(), (im_ud, im_lr), interpolation=cv2.INTER_LINEAR) synthesis_perturbed_label = cv2.resize(self.synthesis_perturbed_label[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max, :].copy(), (im_ud, im_lr), interpolation=cv2.INTER_LINEAR) foreORbackground_label = cv2.resize(self.foreORbackground_label[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max].copy(), (im_ud, im_lr), interpolation=cv2.INTER_LINEAR) foreORbackground_label[foreORbackground_label < 0.99] = 0 foreORbackground_label[foreORbackground_label >= 0.99] = 1 '''shrink fiducial points''' center_x_l, center_y_l = perturbed_x_min + (perturbed_x_max - perturbed_x_min) // 2, perturbed_y_min + (perturbed_y_max - perturbed_y_min) // 2 fiducial_points_coordinate_copy = fiducial_points_coordinate.copy() shrink_x = im_lr/(perturbed_x_max - perturbed_x_min) shrink_y = im_ud/(perturbed_y_max - perturbed_y_min) fiducial_points_coordinate *= [shrink_x, shrink_y] center_x_l *= shrink_x center_y_l *= shrink_y # fiducial_points_coordinate[1:, 1:] *= [shrink_x, shrink_y] # fiducial_points_coordinate[1:, :1, 0] *= shrink_x # fiducial_points_coordinate[:1, 1:, 1] *= shrink_y # perturbed_x_min_copy, perturbed_y_min_copy, perturbed_x_max_copy, perturbed_y_max_copy = perturbed_x_min, perturbed_y_min, perturbed_x_max, perturbed_y_max perturbed_x_min, perturbed_y_min, perturbed_x_max, perturbed_y_max = self.adjust_position_v2(0, 0, im_lr, im_ud, self.new_shape) self.synthesis_perturbed_img = np.full_like(self.synthesis_perturbed_img, 256) self.synthesis_perturbed_label =
np.zeros_like(self.synthesis_perturbed_label)
numpy.zeros_like
import argparse import json import numpy as np import pandas as pd import os from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report,f1_score from keras.models import Sequential from keras.layers import Dense, Dropout from keras import backend as K from keras.utils.vis_utils import plot_model from sklearn.externals import joblib import time def f1(y_true, y_pred): def recall(y_true, y_pred): """Recall metric. Only computes a batch-wise average of recall. Computes the recall, a metric for multi-label classification of how many relevant items are selected. """ true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) recall = true_positives / (possible_positives + K.epsilon()) return recall def precision(y_true, y_pred): """Precision metric. Only computes a batch-wise average of precision. Computes the precision, a metric for multi-label classification of how many selected items are relevant. """ true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + K.epsilon()) return precision precision = precision(y_true, y_pred) recall = recall(y_true, y_pred) return 2*((precision*recall)/(precision+recall+K.epsilon())) def get_embeddings(sentences_list,layer_json): ''' :param sentences_list: the path o the sentences.txt :param layer_json: the path of the json file that contains the embeddings of the sentences :return: Dictionary with key each sentence of the sentences_list and as value the embedding ''' sentences = dict()#dict with key the index of each line of the sentences_list.txt and as value the sentence embeddings = dict()##dict with key the index of each sentence and as value the its embedding sentence_emb = dict()#key:sentence,value:its embedding with open(sentences_list,'r') as file: for index,line in enumerate(file): sentences[index] = line.strip() with open(layer_json, 'r',encoding='utf-8') as f: for line in f: embeddings[json.loads(line)['linex_index']] = np.asarray(json.loads(line)['features']) for key,value in sentences.items(): sentence_emb[value] = embeddings[key] return sentence_emb def train_classifier(sentences_list,layer_json,dataset_csv,filename): ''' :param sentences_list: the path o the sentences.txt :param layer_json: the path of the json file that contains the embeddings of the sentences :param dataset_csv: the path of the dataset :param filename: The path of the pickle file that the model will be stored :return: ''' dataset = pd.read_csv(dataset_csv) bert_dict = get_embeddings(sentences_list,layer_json) length = list() sentence_emb = list() previous_emb = list() next_list = list() section_list = list() label = list() errors = 0 for row in dataset.iterrows(): sentence = row[1][0].strip() previous = row[1][1].strip() nexts = row[1][2].strip() section = row[1][3].strip() if sentence in bert_dict: sentence_emb.append(bert_dict[sentence]) else: sentence_emb.append(np.zeros(768)) print(sentence) errors += 1 if previous in bert_dict: previous_emb.append(bert_dict[previous]) else: previous_emb.append(np.zeros(768)) if nexts in bert_dict: next_list.append(bert_dict[nexts]) else: next_list.append(np.zeros(768)) if section in bert_dict: section_list.append(bert_dict[section]) else: section_list.append(np.zeros(768)) length.append(row[1][4]) label.append(row[1][5]) sentence_emb = np.asarray(sentence_emb) print(sentence_emb.shape) next_emb = np.asarray(next_list) print(next_emb.shape) previous_emb = np.asarray(previous_emb) print(previous_emb.shape) section_emb = np.asarray(section_list) print(sentence_emb.shape) length = np.asarray(length) print(length.shape) label = np.asarray(label) print(errors) features = np.concatenate([sentence_emb, previous_emb, next_emb,section_emb], axis=1) features =
np.column_stack([features, length])
numpy.column_stack
from __future__ import absolute_import from __future__ import division from __future__ import print_function import cntk as C import numpy as np from .common import floatx, epsilon, image_dim_ordering, image_data_format from collections import defaultdict from contextlib import contextmanager import warnings C.set_global_option('align_axis', 1) b_any = any dev = C.device.use_default_device() if dev.type() == 0: warnings.warn( 'CNTK backend warning: GPU is not detected. ' 'CNTK\'s CPU version is not fully optimized,' 'please run with GPU to get better performance.') # A learning phase is a bool tensor used to run Keras models in # either train mode (learning_phase == 1) or test mode (learning_phase == 0). # LEARNING_PHASE_PLACEHOLDER is the placeholder for dynamic learning phase _LEARNING_PHASE_PLACEHOLDER = C.constant(shape=(), dtype=np.float32, value=1.0, name='_keras_learning_phase') # static learning phase flag, if it is not 0 or 1, we will go with dynamic learning phase tensor. _LEARNING_PHASE = -1 _UID_PREFIXES = defaultdict(int) # cntk doesn't support gradient as symbolic op, to hook up with keras model, # we will create gradient as a constant placeholder, here use this global # map to keep the mapping from grad placeholder to parameter grad_parameter_dict = {} NAME_SCOPE_STACK = [] @contextmanager def name_scope(name): global NAME_SCOPE_STACK NAME_SCOPE_STACK.append(name) yield NAME_SCOPE_STACK.pop() def get_uid(prefix=''): _UID_PREFIXES[prefix] += 1 return _UID_PREFIXES[prefix] def learning_phase(): # If _LEARNING_PHASE is not 0 or 1, return dynamic learning phase tensor return _LEARNING_PHASE if _LEARNING_PHASE in {0, 1} else _LEARNING_PHASE_PLACEHOLDER def set_learning_phase(value): global _LEARNING_PHASE if value not in {0, 1}: raise ValueError('CNTK Backend: Set learning phase ' 'with value %s is not supported, ' 'expected 0 or 1.' % value) _LEARNING_PHASE = value def clear_session(): """Reset learning phase flag for cntk backend. """ global _LEARNING_PHASE global _LEARNING_PHASE_PLACEHOLDER _LEARNING_PHASE = -1 _LEARNING_PHASE_PLACEHOLDER.value = np.asarray(1.0) def in_train_phase(x, alt, training=None): global _LEARNING_PHASE if training is None: training = learning_phase() uses_learning_phase = True else: uses_learning_phase = False # CNTK currently don't support cond op, so here we use # element_select approach as workaround. It may have # perf issue, will resolve it later with cntk cond op. if callable(x) and isinstance(x, C.cntk_py.Function) is False: x = x() if callable(alt) and isinstance(alt, C.cntk_py.Function) is False: alt = alt() if training is True: x._uses_learning_phase = uses_learning_phase return x else: # if _LEARNING_PHASE is static if isinstance(training, int) or isinstance(training, bool): result = x if training == 1 or training is True else alt else: result = C.element_select(training, x, alt) result._uses_learning_phase = uses_learning_phase return result def in_test_phase(x, alt, training=None): return in_train_phase(alt, x, training=training) def _convert_string_dtype(dtype): # cntk only support float32 and float64 if dtype == 'float32': return np.float32 elif dtype == 'float64': return np.float64 else: # cntk only running with float, # try to cast to float to run the model return np.float32 def _convert_dtype_string(dtype): if dtype == np.float32: return 'float32' elif dtype == np.float64: return 'float64' else: raise ValueError('CNTK Backend: Unsupported dtype: %s. ' 'CNTK only supports float32 and ' 'float64.' % dtype) def variable(value, dtype=None, name=None, constraint=None): """Instantiates a variable and returns it. # Arguments value: Numpy array, initial value of the tensor. dtype: Tensor type. name: Optional name string for the tensor. constraint: Optional projection function to be applied to the variable after an optimizer update. # Returns A variable instance (with Keras metadata included). """ if dtype is None: dtype = floatx() if name is None: name = '' if isinstance( value, C.variables.Constant) or isinstance( value, C.variables.Parameter): value = value.value # we don't support init parameter with symbolic op, so eval it first as # workaround if isinstance(value, C.cntk_py.Function): value = eval(value) shape = value.shape if hasattr(value, 'shape') else () if hasattr(value, 'dtype') and value.dtype != dtype and len(shape) > 0: value = value.astype(dtype) # TODO: remove the conversion when cntk supports int32, int64 # https://docs.microsoft.com/en-us/python/api/cntk.variables.parameter dtype = 'float32' if 'int' in str(dtype) else dtype v = C.parameter(shape=shape, init=value, dtype=dtype, name=_prepare_name(name, 'variable')) v._keras_shape = v.shape v._uses_learning_phase = False v.constraint = constraint return v def bias_add(x, bias, data_format=None): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) dims = len(x.shape) if dims > 0 and x.shape[0] == C.InferredDimension: dims -= 1 bias_dims = len(bias.shape) if bias_dims != 1 and bias_dims != dims: raise ValueError('Unexpected bias dimensions %d, ' 'expected 1 or %d dimensions' % (bias_dims, dims)) if dims == 4: if data_format == 'channels_first': if bias_dims == 1: shape = (bias.shape[0], 1, 1, 1) else: shape = (bias.shape[3],) + bias.shape[:3] elif data_format == 'channels_last': if bias_dims == 1: shape = (1, 1, 1, bias.shape[0]) else: shape = bias.shape elif dims == 3: if data_format == 'channels_first': if bias_dims == 1: shape = (bias.shape[0], 1, 1) else: shape = (bias.shape[2],) + bias.shape[:2] elif data_format == 'channels_last': if bias_dims == 1: shape = (1, 1, bias.shape[0]) else: shape = bias.shape elif dims == 2: if data_format == 'channels_first': if bias_dims == 1: shape = (bias.shape[0], 1) else: shape = (bias.shape[1],) + bias.shape[:1] elif data_format == 'channels_last': if bias_dims == 1: shape = (1, bias.shape[0]) else: shape = bias.shape else: shape = bias.shape return x + reshape(bias, shape) def eval(x): if isinstance(x, C.cntk_py.Function): return x.eval() elif isinstance(x, C.variables.Constant) or isinstance(x, C.variables.Parameter): return x.value else: raise ValueError('CNTK Backend: `eval` method on ' '`%s` type is not supported. ' 'CNTK only supports `eval` with ' '`Function`, `Constant` or ' '`Parameter`.' % type(x)) def placeholder( shape=None, ndim=None, dtype=None, sparse=False, name=None, dynamic_axis_num=1): if dtype is None: dtype = floatx() if not shape: if ndim: shape = tuple([None for _ in range(ndim)]) dynamic_dimension = C.FreeDimension if _get_cntk_version() >= 2.2 else C.InferredDimension cntk_shape = [dynamic_dimension if s is None else s for s in shape] cntk_shape = tuple(cntk_shape) if dynamic_axis_num > len(cntk_shape): raise ValueError('CNTK backend: creating placeholder with ' '%d dimension is not supported, at least ' '%d dimensions are needed.' % (len(cntk_shape, dynamic_axis_num))) if name is None: name = '' cntk_shape = cntk_shape[dynamic_axis_num:] x = C.input( shape=cntk_shape, dtype=_convert_string_dtype(dtype), is_sparse=sparse, name=name) x._keras_shape = shape x._uses_learning_phase = False x._cntk_placeholder = True return x def is_placeholder(x): """Returns whether `x` is a placeholder. # Arguments x: A candidate placeholder. # Returns Boolean. """ return hasattr(x, '_cntk_placeholder') and x._cntk_placeholder def is_keras_tensor(x): if not is_tensor(x): raise ValueError('Unexpectedly found an instance of type `' + str(type(x)) + '`. ' 'Expected a symbolic tensor instance.') return hasattr(x, '_keras_history') def is_tensor(x): return isinstance(x, (C.variables.Constant, C.variables.Variable, C.variables.Parameter, C.ops.functions.Function)) def shape(x): shape = list(int_shape(x)) num_dynamic = _get_dynamic_axis_num(x) non_dyn_shape = [] for i in range(len(x.shape)): if shape[i + num_dynamic] is None: non_dyn_shape.append(x.shape[i]) else: non_dyn_shape.append(shape[i + num_dynamic]) return shape[:num_dynamic] + non_dyn_shape def is_sparse(tensor): return tensor.is_sparse def int_shape(x): if hasattr(x, '_keras_shape'): return x._keras_shape shape = x.shape if hasattr(x, 'dynamic_axes'): dynamic_shape = [None for a in x.dynamic_axes] shape = tuple(dynamic_shape) + shape return shape def ndim(x): shape = int_shape(x) return len(shape) def _prepare_name(name, default): prefix = '_'.join(NAME_SCOPE_STACK) if name is None or name == '': return prefix + '/' + default return prefix + '/' + name def constant(value, dtype=None, shape=None, name=None): if dtype is None: dtype = floatx() if shape is None: shape = () np_value = value * np.ones(shape) const = C.constant(np_value, dtype=dtype, name=_prepare_name(name, 'constant')) const._keras_shape = const.shape const._uses_learning_phase = False return const def random_binomial(shape, p=0.0, dtype=None, seed=None): # use numpy workaround now if seed is None: # ensure that randomness is conditioned by the Numpy RNG seed = np.random.randint(10e7) np.random.seed(seed) if dtype is None: dtype = np.float32 else: dtype = _convert_string_dtype(dtype) size = 1 for _ in shape: if _ is None: raise ValueError('CNTK Backend: randomness op with ' 'dynamic shape is not supported now. ' 'Please provide fixed dimension ' 'instead of `None`.') size *= _ binomial = np.random.binomial(1, p, size).astype(dtype).reshape(shape) return variable(value=binomial, dtype=dtype) def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None): for _ in shape: if _ is None: raise ValueError('CNTK Backend: randomness op with ' 'dynamic shape is not supported now. ' 'Please provide fixed dimension ' 'instead of `None`.') return random_uniform_variable(shape, minval, maxval, dtype, seed) def random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None): if dtype is None: dtype = floatx() if seed is None: # ensure that randomness is conditioned by the Numpy RNG seed = np.random.randint(10e3) if dtype is None: dtype = np.float32 else: dtype = _convert_string_dtype(dtype) if name is None: name = '' scale = (high - low) / 2 p = C.parameter( shape, init=C.initializer.uniform( scale, seed=seed), dtype=dtype, name=name) return variable(value=p.value + low + scale) def random_normal_variable( shape, mean, scale, dtype=None, name=None, seed=None): if dtype is None: dtype = floatx() if seed is None: # ensure that randomness is conditioned by the Numpy RNG seed = np.random.randint(10e7) if dtype is None: dtype = np.float32 else: dtype = _convert_string_dtype(dtype) if name is None: name = '' return C.parameter( shape=shape, init=C.initializer.normal( scale=scale, seed=seed), dtype=dtype, name=name) def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): if dtype is None: dtype = floatx() for _ in shape: if _ is None: raise ValueError('CNTK Backend: randomness op with ' 'dynamic shape is not supported now. ' 'Please provide fixed dimension ' 'instead of `None`.') # how to apply mean and stddev return random_normal_variable(shape=shape, mean=mean, scale=1.0, seed=seed) def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): if seed is None: seed = np.random.randint(1, 10e6) if dtype is None: dtype = np.float32 else: dtype = _convert_string_dtype(dtype) return C.parameter( shape, init=C.initializer.truncated_normal( stddev, seed=seed), dtype=dtype) def dtype(x): return _convert_dtype_string(x.dtype) def zeros(shape, dtype=None, name=None): if dtype is None: dtype = floatx() ctype = _convert_string_dtype(dtype) return variable(value=np.zeros(shape, ctype), dtype=dtype, name=name) def ones(shape, dtype=None, name=None): if dtype is None: dtype = floatx() ctype = _convert_string_dtype(dtype) return variable(value=
np.ones(shape, ctype)
numpy.ones
# ________ # / # \ / # \ / # \/ import random import textwrap import emd_mean import AdvEMDpy import emd_basis import emd_utils import numpy as np import pandas as pd import cvxpy as cvx import seaborn as sns import matplotlib.pyplot as plt from scipy.integrate import odeint from scipy.ndimage import gaussian_filter from emd_utils import time_extension, Utility from scipy.interpolate import CubicSpline from emd_hilbert import Hilbert, hilbert_spectrum from emd_preprocess import Preprocess from emd_mean import Fluctuation from AdvEMDpy import EMD # alternate packages from PyEMD import EMD as pyemd0215 import emd as emd040 sns.set(style='darkgrid') pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001) pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time) pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series) # plot 0 - addition fig = plt.figure(figsize=(9, 4)) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('First Iteration of Sifting Algorithm') plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1) plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()], c='r', label=r'$M(t_i)$', zorder=2) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4) plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()], c='c', label=r'$m(t_j)$', zorder=3) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5) plt.yticks(ticks=[-2, -1, 0, 1, 2]) plt.xticks(ticks=[0, np.pi, 2 * np.pi], labels=[r'0', r'$\pi$', r'$2\pi$']) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/pseudo_algorithm.png') plt.show() knots = np.arange(12) time = np.linspace(0, 11, 1101) basis = emd_basis.Basis(time=time, time_series=time) b_spline_basis = basis.cubic_b_spline(knots) chsi_basis = basis.chsi_basis(knots) # plot 1 plt.title('Non-Natural Cubic B-Spline Bases at Boundary') plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $') plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $') plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $') plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $') plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $') plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $']) plt.xlim(4.4, 6.6) plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.legend(loc='upper left') plt.savefig('jss_figures/boundary_bases.png') plt.show() # plot 1a - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) knots_uniform = np.linspace(0, 2 * np.pi, 51) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0] fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Uniform Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Uniform Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Uniform Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots_uniform)): axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_uniform.png') plt.show() # plot 1b - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=1, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Statically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Statically Optimised Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Statically Optimised Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots)): axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_1.png') plt.show() # plot 1c - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=2, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Dynamically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Dynamically Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Dynamically Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots[i])): axs[i].plot(knots[i][j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_2.png') plt.show() # plot 1d - addition window = 81 fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Filtering Demonstration') axs[1].set_title('Zoomed Region') preprocess_time = pseudo_alg_time.copy() np.random.seed(1) random.seed(1) preprocess_time_series = pseudo_alg_time_series + np.random.normal(0, 0.1, len(preprocess_time)) for i in random.sample(range(1000), 500): preprocess_time_series[i] += np.random.normal(0, 1) preprocess = Preprocess(time=preprocess_time, time_series=preprocess_time_series) axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[0].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[0].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[0].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[1].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[1].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[1].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_filter.png') plt.show() # plot 1e - addition fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Smoothing Demonstration') axs[1].set_title('Zoomed Region') axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[0].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) downsampled_and_decimated = preprocess.downsample() axs[0].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 11)) downsampled = preprocess.downsample(decimate=False) axs[0].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[1].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) axs[1].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 13)) axs[1].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.06, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.06, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_smooth.png') plt.show() # plot 2 fig, axs = plt.subplots(1, 2, sharey=True) axs[0].set_title('Cubic B-Spline Bases') axs[0].plot(time, b_spline_basis[2, :].T, '--', label='Basis 1') axs[0].plot(time, b_spline_basis[3, :].T, '--', label='Basis 2') axs[0].plot(time, b_spline_basis[4, :].T, '--', label='Basis 3') axs[0].plot(time, b_spline_basis[5, :].T, '--', label='Basis 4') axs[0].legend(loc='upper left') axs[0].plot(5 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].plot(6 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].set_xticks([5, 6]) axs[0].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[0].set_xlim(4.5, 6.5) axs[1].set_title('Cubic Hermite Spline Bases') axs[1].plot(time, chsi_basis[10, :].T, '--') axs[1].plot(time, chsi_basis[11, :].T, '--') axs[1].plot(time, chsi_basis[12, :].T, '--') axs[1].plot(time, chsi_basis[13, :].T, '--') axs[1].plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].set_xticks([5, 6]) axs[1].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[1].set_xlim(4.5, 6.5) plt.savefig('jss_figures/comparing_bases.png') plt.show() # plot 3 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_dash = maxima_y[-1] * np.ones_like(max_dash_time) min_dash_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_dash = minima_y[-1] * np.ones_like(min_dash_time) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) max_discard = maxima_y[-1] max_discard_time = minima_x[-1] - maxima_x[-1] + minima_x[-1] max_discard_dash_time = np.linspace(max_discard_time - width, max_discard_time + width, 101) max_discard_dash = max_discard * np.ones_like(max_discard_dash_time) dash_2_time = np.linspace(minima_x[-1], max_discard_time, 101) dash_2 = np.linspace(minima_y[-1], max_discard, 101) end_point_time = time[-1] end_point = time_series[-1] time_reflect = np.linspace((5 - a) * np.pi, (5 + a) * np.pi, 101) time_series_reflect = np.flip(np.cos(np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101)) + np.cos(5 * np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101))) time_series_anti_reflect = time_series_reflect[0] - time_series_reflect utils = emd_utils.Utility(time=time, time_series=time_series_anti_reflect) anti_max_bool = utils.max_bool_func_1st_order_fd() anti_max_point_time = time_reflect[anti_max_bool] anti_max_point = time_series_anti_reflect[anti_max_bool] utils = emd_utils.Utility(time=time, time_series=time_series_reflect) no_anchor_max_time = time_reflect[utils.max_bool_func_1st_order_fd()] no_anchor_max = time_series_reflect[utils.max_bool_func_1st_order_fd()] point_1 = 5.4 length_distance = np.linspace(maxima_y[-1], minima_y[-1], 101) length_distance_time = point_1 * np.pi * np.ones_like(length_distance) length_time = np.linspace(point_1 * np.pi - width, point_1 * np.pi + width, 101) length_top = maxima_y[-1] * np.ones_like(length_time) length_bottom = minima_y[-1] * np.ones_like(length_time) point_2 = 5.2 length_distance_2 = np.linspace(time_series[-1], minima_y[-1], 101) length_distance_time_2 = point_2 * np.pi * np.ones_like(length_distance_2) length_time_2 = np.linspace(point_2 * np.pi - width, point_2 * np.pi + width, 101) length_top_2 = time_series[-1] * np.ones_like(length_time_2) length_bottom_2 = minima_y[-1] * np.ones_like(length_time_2) symmetry_axis_1_time = minima_x[-1] * np.ones(101) symmetry_axis_2_time = time[-1] * np.ones(101) symmetry_axis = np.linspace(-2, 2, 101) end_time = np.linspace(time[-1] - width, time[-1] + width, 101) end_signal = time_series[-1] * np.ones_like(end_time) anti_symmetric_time = np.linspace(time[-1] - 0.5, time[-1] + 0.5, 101) anti_symmetric_signal = time_series[-1] * np.ones_like(anti_symmetric_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Symmetry Edge Effects Example') plt.plot(time_reflect, time_series_reflect, 'g--', LineWidth=2, label=textwrap.fill('Symmetric signal', 10)) plt.plot(time_reflect[:51], time_series_anti_reflect[:51], '--', c='purple', LineWidth=2, label=textwrap.fill('Anti-symmetric signal', 10)) plt.plot(max_dash_time, max_dash, 'k-') plt.plot(min_dash_time, min_dash, 'k-') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(length_distance_time, length_distance, 'k--') plt.plot(length_distance_time_2, length_distance_2, 'k--') plt.plot(length_time, length_top, 'k-') plt.plot(length_time, length_bottom, 'k-') plt.plot(length_time_2, length_top_2, 'k-') plt.plot(length_time_2, length_bottom_2, 'k-') plt.plot(end_time, end_signal, 'k-') plt.plot(symmetry_axis_1_time, symmetry_axis, 'r--', zorder=1) plt.plot(anti_symmetric_time, anti_symmetric_signal, 'r--', zorder=1) plt.plot(symmetry_axis_2_time, symmetry_axis, 'r--', label=textwrap.fill('Axes of symmetry', 10), zorder=1) plt.text(5.1 * np.pi, -0.7, r'$\beta$L') plt.text(5.34 * np.pi, -0.05, 'L') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(max_discard_time, max_discard, c='purple', zorder=4, label=textwrap.fill('Symmetric Discard maxima', 10)) plt.scatter(end_point_time, end_point, c='orange', zorder=4, label=textwrap.fill('Symmetric Anchor maxima', 10)) plt.scatter(anti_max_point_time, anti_max_point, c='green', zorder=4, label=textwrap.fill('Anti-Symmetric maxima', 10)) plt.scatter(no_anchor_max_time, no_anchor_max, c='gray', zorder=4, label=textwrap.fill('Symmetric maxima', 10)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_symmetry_anti.png') plt.show() # plot 4 a = 0.21 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_1 = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_dash_2 = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_dash_time_1 = maxima_x[-1] * np.ones_like(max_dash_1) max_dash_time_2 = maxima_x[-2] * np.ones_like(max_dash_1) min_dash_1 = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_dash_2 = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_dash_time_1 = minima_x[-1] * np.ones_like(min_dash_1) min_dash_time_2 = minima_x[-2] * np.ones_like(min_dash_1) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) dash_2_time = np.linspace(maxima_x[-1], minima_x[-2], 101) dash_2 = np.linspace(maxima_y[-1], minima_y[-2], 101) s1 = (minima_y[-2] - maxima_y[-1]) / (minima_x[-2] - maxima_x[-1]) slope_based_maximum_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) slope_based_maximum = minima_y[-1] + (slope_based_maximum_time - minima_x[-1]) * s1 max_dash_time_3 = slope_based_maximum_time * np.ones_like(max_dash_1) max_dash_3 = np.linspace(slope_based_maximum - width, slope_based_maximum + width, 101) dash_3_time = np.linspace(minima_x[-1], slope_based_maximum_time, 101) dash_3 = np.linspace(minima_y[-1], slope_based_maximum, 101) s2 = (minima_y[-1] - maxima_y[-1]) / (minima_x[-1] - maxima_x[-1]) slope_based_minimum_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) slope_based_minimum = slope_based_maximum - (slope_based_maximum_time - slope_based_minimum_time) * s2 min_dash_time_3 = slope_based_minimum_time * np.ones_like(min_dash_1) min_dash_3 = np.linspace(slope_based_minimum - width, slope_based_minimum + width, 101) dash_4_time = np.linspace(slope_based_maximum_time, slope_based_minimum_time) dash_4 = np.linspace(slope_based_maximum, slope_based_minimum) maxima_dash = np.linspace(2.5 - width, 2.5 + width, 101) maxima_dash_time_1 = maxima_x[-2] * np.ones_like(maxima_dash) maxima_dash_time_2 = maxima_x[-1] * np.ones_like(maxima_dash) maxima_dash_time_3 = slope_based_maximum_time * np.ones_like(maxima_dash) maxima_line_dash_time = np.linspace(maxima_x[-2], slope_based_maximum_time, 101) maxima_line_dash = 2.5 * np.ones_like(maxima_line_dash_time) minima_dash = np.linspace(-3.4 - width, -3.4 + width, 101) minima_dash_time_1 = minima_x[-2] * np.ones_like(minima_dash) minima_dash_time_2 = minima_x[-1] * np.ones_like(minima_dash) minima_dash_time_3 = slope_based_minimum_time * np.ones_like(minima_dash) minima_line_dash_time = np.linspace(minima_x[-2], slope_based_minimum_time, 101) minima_line_dash = -3.4 * np.ones_like(minima_line_dash_time) # slightly edit signal to make difference between slope-based method and improved slope-based method more clear time_series[time >= minima_x[-1]] = 1.5 * (time_series[time >= minima_x[-1]] - time_series[time == minima_x[-1]]) + \ time_series[time == minima_x[-1]] improved_slope_based_maximum_time = time[-1] improved_slope_based_maximum = time_series[-1] improved_slope_based_minimum_time = slope_based_minimum_time improved_slope_based_minimum = improved_slope_based_maximum + s2 * (improved_slope_based_minimum_time - improved_slope_based_maximum_time) min_dash_4 = np.linspace(improved_slope_based_minimum - width, improved_slope_based_minimum + width, 101) min_dash_time_4 = improved_slope_based_minimum_time * np.ones_like(min_dash_4) dash_final_time = np.linspace(improved_slope_based_maximum_time, improved_slope_based_minimum_time, 101) dash_final = np.linspace(improved_slope_based_maximum, improved_slope_based_minimum, 101) ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 0.9 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Slope-Based Edge Effects Example') plt.plot(max_dash_time_1, max_dash_1, 'k-') plt.plot(max_dash_time_2, max_dash_2, 'k-') plt.plot(max_dash_time_3, max_dash_3, 'k-') plt.plot(min_dash_time_1, min_dash_1, 'k-') plt.plot(min_dash_time_2, min_dash_2, 'k-') plt.plot(min_dash_time_3, min_dash_3, 'k-') plt.plot(min_dash_time_4, min_dash_4, 'k-') plt.plot(maxima_dash_time_1, maxima_dash, 'k-') plt.plot(maxima_dash_time_2, maxima_dash, 'k-') plt.plot(maxima_dash_time_3, maxima_dash, 'k-') plt.plot(minima_dash_time_1, minima_dash, 'k-') plt.plot(minima_dash_time_2, minima_dash, 'k-') plt.plot(minima_dash_time_3, minima_dash, 'k-') plt.text(4.34 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.74 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.12 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.50 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.30 * np.pi, 0.35, r'$s_1$') plt.text(4.43 * np.pi, -0.20, r'$s_2$') plt.text(4.30 * np.pi + (minima_x[-1] - minima_x[-2]), 0.35 + (minima_y[-1] - minima_y[-2]), r'$s_1$') plt.text(4.43 * np.pi + (slope_based_minimum_time - minima_x[-1]), -0.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.text(4.50 * np.pi + (slope_based_minimum_time - minima_x[-1]), 1.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.plot(minima_line_dash_time, minima_line_dash, 'k--') plt.plot(maxima_line_dash_time, maxima_line_dash, 'k--') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(dash_3_time, dash_3, 'k--') plt.plot(dash_4_time, dash_4, 'k--') plt.plot(dash_final_time, dash_final, 'k--') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(slope_based_maximum_time, slope_based_maximum, c='orange', zorder=4, label=textwrap.fill('Slope-based maximum', 11)) plt.scatter(slope_based_minimum_time, slope_based_minimum, c='purple', zorder=4, label=textwrap.fill('Slope-based minimum', 11)) plt.scatter(improved_slope_based_maximum_time, improved_slope_based_maximum, c='deeppink', zorder=4, label=textwrap.fill('Improved slope-based maximum', 11)) plt.scatter(improved_slope_based_minimum_time, improved_slope_based_minimum, c='dodgerblue', zorder=4, label=textwrap.fill('Improved slope-based minimum', 11)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-3, -2, -1, 0, 1, 2), ('-3', '-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_slope_based.png') plt.show() # plot 5 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] A2 = np.abs(maxima_y[-2] - minima_y[-2]) / 2 A1 = np.abs(maxima_y[-1] - minima_y[-1]) / 2 P2 = 2 * np.abs(maxima_x[-2] - minima_x[-2]) P1 = 2 * np.abs(maxima_x[-1] - minima_x[-1]) Huang_time = (P1 / P2) * (time[time >= maxima_x[-2]] - time[time == maxima_x[-2]]) + maxima_x[-1] Huang_wave = (A1 / A2) * (time_series[time >= maxima_x[-2]] - time_series[time == maxima_x[-2]]) + maxima_y[-1] Coughlin_time = Huang_time Coughlin_wave = A1 * np.cos(2 * np.pi * (1 / P1) * (Coughlin_time - Coughlin_time[0])) Average_max_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) Average_max = (maxima_y[-2] + maxima_y[-1]) / 2 Average_min_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) Average_min = (minima_y[-2] + minima_y[-1]) / 2 utils_Huang = emd_utils.Utility(time=time, time_series=Huang_wave) Huang_max_bool = utils_Huang.max_bool_func_1st_order_fd() Huang_min_bool = utils_Huang.min_bool_func_1st_order_fd() utils_Coughlin = emd_utils.Utility(time=time, time_series=Coughlin_wave) Coughlin_max_bool = utils_Coughlin.max_bool_func_1st_order_fd() Coughlin_min_bool = utils_Coughlin.min_bool_func_1st_order_fd() Huang_max_time = Huang_time[Huang_max_bool] Huang_max = Huang_wave[Huang_max_bool] Huang_min_time = Huang_time[Huang_min_bool] Huang_min = Huang_wave[Huang_min_bool] Coughlin_max_time = Coughlin_time[Coughlin_max_bool] Coughlin_max = Coughlin_wave[Coughlin_max_bool] Coughlin_min_time = Coughlin_time[Coughlin_min_bool] Coughlin_min = Coughlin_wave[Coughlin_min_bool] max_2_x_time = np.linspace(maxima_x[-2] - width, maxima_x[-2] + width, 101) max_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) max_2_x = maxima_y[-2] * np.ones_like(max_2_x_time) min_2_x_time = np.linspace(minima_x[-2] - width, minima_x[-2] + width, 101) min_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) min_2_x = minima_y[-2] * np.ones_like(min_2_x_time) dash_max_min_2_x = np.linspace(minima_y[-2], maxima_y[-2], 101) dash_max_min_2_x_time = 5.3 * np.pi * np.ones_like(dash_max_min_2_x) max_2_y = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) max_2_y_time = maxima_x[-2] * np.ones_like(max_2_y) min_2_y = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) min_2_y_time = minima_x[-2] * np.ones_like(min_2_y) dash_max_min_2_y_time = np.linspace(minima_x[-2], maxima_x[-2], 101) dash_max_min_2_y = -1.8 * np.ones_like(dash_max_min_2_y_time) max_1_x_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) max_1_x = maxima_y[-1] * np.ones_like(max_1_x_time) min_1_x_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) min_1_x = minima_y[-1] * np.ones_like(min_1_x_time) dash_max_min_1_x = np.linspace(minima_y[-1], maxima_y[-1], 101) dash_max_min_1_x_time = 5.4 * np.pi * np.ones_like(dash_max_min_1_x) max_1_y = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) max_1_y_time = maxima_x[-1] * np.ones_like(max_1_y) min_1_y = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) min_1_y_time = minima_x[-1] * np.ones_like(min_1_y) dash_max_min_1_y_time = np.linspace(minima_x[-1], maxima_x[-1], 101) dash_max_min_1_y = -2.1 * np.ones_like(dash_max_min_1_y_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Characteristic Wave Effects Example') plt.plot(time, time_series, LineWidth=2, label='Signal') plt.scatter(Huang_max_time, Huang_max, c='magenta', zorder=4, label=textwrap.fill('Huang maximum', 10)) plt.scatter(Huang_min_time, Huang_min, c='lime', zorder=4, label=textwrap.fill('Huang minimum', 10)) plt.scatter(Coughlin_max_time, Coughlin_max, c='darkorange', zorder=4, label=textwrap.fill('Coughlin maximum', 14)) plt.scatter(Coughlin_min_time, Coughlin_min, c='dodgerblue', zorder=4, label=textwrap.fill('Coughlin minimum', 14)) plt.scatter(Average_max_time, Average_max, c='orangered', zorder=4, label=textwrap.fill('Average maximum', 14)) plt.scatter(Average_min_time, Average_min, c='cyan', zorder=4, label=textwrap.fill('Average minimum', 14)) plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.plot(Huang_time, Huang_wave, '--', c='darkviolet', label=textwrap.fill('Huang Characteristic Wave', 14)) plt.plot(Coughlin_time, Coughlin_wave, '--', c='darkgreen', label=textwrap.fill('Coughlin Characteristic Wave', 14)) plt.plot(max_2_x_time, max_2_x, 'k-') plt.plot(max_2_x_time_side, max_2_x, 'k-') plt.plot(min_2_x_time, min_2_x, 'k-') plt.plot(min_2_x_time_side, min_2_x, 'k-') plt.plot(dash_max_min_2_x_time, dash_max_min_2_x, 'k--') plt.text(5.16 * np.pi, 0.85, r'$2a_2$') plt.plot(max_2_y_time, max_2_y, 'k-') plt.plot(max_2_y_time, max_2_y_side, 'k-') plt.plot(min_2_y_time, min_2_y, 'k-') plt.plot(min_2_y_time, min_2_y_side, 'k-') plt.plot(dash_max_min_2_y_time, dash_max_min_2_y, 'k--') plt.text(4.08 * np.pi, -2.2, r'$\frac{p_2}{2}$') plt.plot(max_1_x_time, max_1_x, 'k-') plt.plot(max_1_x_time_side, max_1_x, 'k-') plt.plot(min_1_x_time, min_1_x, 'k-') plt.plot(min_1_x_time_side, min_1_x, 'k-') plt.plot(dash_max_min_1_x_time, dash_max_min_1_x, 'k--') plt.text(5.42 * np.pi, -0.1, r'$2a_1$') plt.plot(max_1_y_time, max_1_y, 'k-') plt.plot(max_1_y_time, max_1_y_side, 'k-') plt.plot(min_1_y_time, min_1_y, 'k-') plt.plot(min_1_y_time, min_1_y_side, 'k-') plt.plot(dash_max_min_1_y_time, dash_max_min_1_y, 'k--') plt.text(4.48 * np.pi, -2.5, r'$\frac{p_1}{2}$') plt.xlim(3.9 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_characteristic_wave.png') plt.show() # plot 6 t = np.linspace(5, 95, 100) signal_orig = np.cos(2 * np.pi * t / 50) + 0.6 * np.cos(2 * np.pi * t / 25) + 0.5 * np.sin(2 * np.pi * t / 200) util_nn = emd_utils.Utility(time=t, time_series=signal_orig) maxima = signal_orig[util_nn.max_bool_func_1st_order_fd()] minima = signal_orig[util_nn.min_bool_func_1st_order_fd()] cs_max = CubicSpline(t[util_nn.max_bool_func_1st_order_fd()], maxima) cs_min = CubicSpline(t[util_nn.min_bool_func_1st_order_fd()], minima) time = np.linspace(0, 5 * np.pi, 1001) lsq_signal = np.cos(time) + np.cos(5 * time) knots = np.linspace(0, 5 * np.pi, 101) time_extended = time_extension(time) time_series_extended = np.zeros_like(time_extended) / 0 time_series_extended[int(len(lsq_signal) - 1):int(2 * (len(lsq_signal) - 1) + 1)] = lsq_signal neural_network_m = 200 neural_network_k = 100 # forward -> P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[(-(neural_network_m + neural_network_k - col)):(-(neural_network_m - col))] P[-1, col] = 1 # for additive constant t = lsq_signal[-neural_network_m:] # test - top seed_weights = np.ones(neural_network_k) / neural_network_k weights = 0 * seed_weights.copy() train_input = P[:-1, :] lr = 0.01 for iterations in range(1000): output = np.matmul(weights, train_input) error = (t - output) gradients = error * (- train_input) # guess average gradients average_gradients = np.mean(gradients, axis=1) # steepest descent max_gradient_vector = average_gradients * (np.abs(average_gradients) == max(np.abs(average_gradients))) adjustment = - lr * average_gradients # adjustment = - lr * max_gradient_vector weights += adjustment # test - bottom weights_right = np.hstack((weights, 0)) max_count_right = 0 min_count_right = 0 i_right = 0 while ((max_count_right < 1) or (min_count_right < 1)) and (i_right < len(lsq_signal) - 1): time_series_extended[int(2 * (len(lsq_signal) - 1) + 1 + i_right)] = \ sum(weights_right * np.hstack((time_series_extended[ int(2 * (len(lsq_signal) - 1) + 1 - neural_network_k + i_right): int(2 * (len(lsq_signal) - 1) + 1 + i_right)], 1))) i_right += 1 if i_right > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_right += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_right += 1 # backward <- P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[int(col + 1):int(col + neural_network_k + 1)] P[-1, col] = 1 # for additive constant t = lsq_signal[:neural_network_m] vx = cvx.Variable(int(neural_network_k + 1)) objective = cvx.Minimize(cvx.norm((2 * (vx * P) + 1 - t), 2)) # linear activation function is arbitrary prob = cvx.Problem(objective) result = prob.solve(verbose=True, solver=cvx.ECOS) weights_left = np.array(vx.value) max_count_left = 0 min_count_left = 0 i_left = 0 while ((max_count_left < 1) or (min_count_left < 1)) and (i_left < len(lsq_signal) - 1): time_series_extended[int(len(lsq_signal) - 2 - i_left)] = \ 2 * sum(weights_left * np.hstack((time_series_extended[int(len(lsq_signal) - 1 - i_left): int(len(lsq_signal) - 1 - i_left + neural_network_k)], 1))) + 1 i_left += 1 if i_left > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_left += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_left += 1 lsq_utils = emd_utils.Utility(time=time, time_series=lsq_signal) utils_extended = emd_utils.Utility(time=time_extended, time_series=time_series_extended) maxima = lsq_signal[lsq_utils.max_bool_func_1st_order_fd()] maxima_time = time[lsq_utils.max_bool_func_1st_order_fd()] maxima_extrapolate = time_series_extended[utils_extended.max_bool_func_1st_order_fd()][-1] maxima_extrapolate_time = time_extended[utils_extended.max_bool_func_1st_order_fd()][-1] minima = lsq_signal[lsq_utils.min_bool_func_1st_order_fd()] minima_time = time[lsq_utils.min_bool_func_1st_order_fd()] minima_extrapolate = time_series_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] minima_extrapolate_time = time_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Single Neuron Neural Network Example') plt.plot(time, lsq_signal, zorder=2, label='Signal') plt.plot(time_extended, time_series_extended, c='g', zorder=1, label=textwrap.fill('Extrapolated signal', 12)) plt.scatter(maxima_time, maxima, c='r', zorder=3, label='Maxima') plt.scatter(minima_time, minima, c='b', zorder=3, label='Minima') plt.scatter(maxima_extrapolate_time, maxima_extrapolate, c='magenta', zorder=3, label=textwrap.fill('Extrapolated maxima', 12)) plt.scatter(minima_extrapolate_time, minima_extrapolate, c='cyan', zorder=4, label=textwrap.fill('Extrapolated minima', 12)) plt.plot(((time[-302] + time[-301]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k', label=textwrap.fill('Neural network inputs', 13)) plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(((time_extended[-1001] + time_extended[-1002]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k') plt.plot(((time[-202] + time[-201]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed', label=textwrap.fill('Neural network targets', 13)) plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2), ((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), -2.75 *
np.ones(100)
numpy.ones
import time import h5py import hdbscan import numpy as np import torch from sklearn.cluster import MeanShift from pytorch3dunet.datasets.hdf5 import SliceBuilder from pytorch3dunet.unet3d.utils import get_logger from pytorch3dunet.unet3d.utils import unpad logger = get_logger('UNet3DPredictor') class _AbstractPredictor: def __init__(self, model, loader, output_file, config, **kwargs): self.model = model self.loader = loader self.output_file = output_file self.config = config self.predictor_config = kwargs @staticmethod def _volume_shape(dataset): # TODO: support multiple internal datasets raw = dataset.raws[0] if raw.ndim == 3: return raw.shape else: return raw.shape[1:] @staticmethod def _get_output_dataset_names(number_of_datasets, prefix='predictions'): if number_of_datasets == 1: return [prefix] else: return [f'{prefix}{i}' for i in range(number_of_datasets)] def predict(self): raise NotImplementedError class StandardPredictor(_AbstractPredictor): """ Applies the model on the given dataset and saves the result in the `output_file` in the H5 format. Predictions from the network are kept in memory. If the results from the network don't fit in into RAM use `LazyPredictor` instead. The output dataset names inside the H5 is given by `des_dataset_name` config argument. If the argument is not present in the config 'predictions{n}' is used as a default dataset name, where `n` denotes the number of the output head from the network. Args: model (Unet3D): trained 3D UNet model used for prediction data_loader (torch.utils.data.DataLoader): input data loader output_file (str): path to the output H5 file config (dict): global config dict """ def __init__(self, model, loader, output_file, config, **kwargs): super().__init__(model, loader, output_file, config, **kwargs) def predict(self): out_channels = self.config['model'].get('out_channels') if out_channels is None: out_channels = self.config['model']['dt_out_channels'] prediction_channel = self.config.get('prediction_channel', None) if prediction_channel is not None: logger.info(f"Using only channel '{prediction_channel}' from the network output") device = self.config['device'] output_heads = self.config['model'].get('output_heads', 1) logger.info(f'Running prediction on {len(self.loader)} batches...') # dimensionality of the the output predictions volume_shape = self._volume_shape(self.loader.dataset) if prediction_channel is None: prediction_maps_shape = (out_channels,) + volume_shape else: # single channel prediction map prediction_maps_shape = (1,) + volume_shape logger.info(f'The shape of the output prediction maps (CDHW): {prediction_maps_shape}') avoid_block_artifacts = self.predictor_config.get('avoid_block_artifacts', True) logger.info(f'Avoid block artifacts: {avoid_block_artifacts}') # create destination H5 file h5_output_file = h5py.File(self.output_file, 'w') # allocate prediction and normalization arrays logger.info('Allocating prediction and normalization arrays...') prediction_maps, normalization_masks = self._allocate_prediction_maps(prediction_maps_shape, output_heads, h5_output_file) # Sets the module in evaluation mode explicitly (necessary for batchnorm/dropout layers if present) self.model.eval() # Set the `testing=true` flag otherwise the final Softmax/Sigmoid won't be applied! self.model.testing = True # Run predictions on the entire input dataset with torch.no_grad(): for batch, indices in self.loader: # send batch to device batch = batch.to(device) # forward pass predictions = self.model(batch) # wrap predictions into a list if there is only one output head from the network if output_heads == 1: predictions = [predictions] # for each output head for prediction, prediction_map, normalization_mask in zip(predictions, prediction_maps, normalization_masks): # convert to numpy array prediction = prediction.cpu().numpy() # for each batch sample for pred, index in zip(prediction, indices): # save patch index: (C,D,H,W) if prediction_channel is None: channel_slice = slice(0, out_channels) else: channel_slice = slice(0, 1) index = (channel_slice,) + index if prediction_channel is not None: # use only the 'prediction_channel' logger.info(f"Using channel '{prediction_channel}'...") pred = np.expand_dims(pred[prediction_channel], axis=0) logger.info(f'Saving predictions for slice:{index}...') if avoid_block_artifacts: # unpad in order to avoid block artifacts in the output probability maps u_prediction, u_index = unpad(pred, index, volume_shape) # accumulate probabilities into the output prediction array prediction_map[u_index] += u_prediction # count voxel visits for normalization normalization_mask[u_index] += 1 else: # accumulate probabilities into the output prediction array prediction_map[index] += pred # count voxel visits for normalization normalization_mask[index] += 1 # save results to self._save_results(prediction_maps, normalization_masks, output_heads, h5_output_file, self.loader.dataset) # close the output H5 file h5_output_file.close() def _allocate_prediction_maps(self, output_shape, output_heads, output_file): # initialize the output prediction arrays prediction_maps = [np.zeros(output_shape, dtype='float32') for _ in range(output_heads)] # initialize normalization mask in order to average out probabilities of overlapping patches normalization_masks = [np.zeros(output_shape, dtype='uint8') for _ in range(output_heads)] return prediction_maps, normalization_masks def _save_results(self, prediction_maps, normalization_masks, output_heads, output_file, dataset): # save probability maps prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions') for prediction_map, normalization_mask, prediction_dataset in zip(prediction_maps, normalization_masks, prediction_datasets): prediction_map = prediction_map / normalization_mask if dataset.mirror_padding: pad_width = dataset.pad_width logger.info(f'Dataset loaded with mirror padding, pad_width: {pad_width}. Cropping before saving...') prediction_map = prediction_map[:, pad_width:-pad_width, pad_width:-pad_width, pad_width:-pad_width] logger.info(f'Saving predictions to: {output_file}/{prediction_dataset}...') output_file.create_dataset(prediction_dataset, data=prediction_map, compression="gzip") class LazyPredictor(StandardPredictor): """ Applies the model on the given dataset and saves the result in the `output_file` in the H5 format. Predicted patches are directly saved into the H5 and they won't be stored in memory. Since this predictor is slower than the `StandardPredictor` it should only be used when the predicted volume does not fit into RAM. The output dataset names inside the H5 is given by `des_dataset_name` config argument. If the argument is not present in the config 'predictions{n}' is used as a default dataset name, where `n` denotes the number of the output head from the network. Args: model (Unet3D): trained 3D UNet model used for prediction data_loader (torch.utils.data.DataLoader): input data loader output_file (str): path to the output H5 file config (dict): global config dict """ def __init__(self, model, loader, output_file, config, **kwargs): super().__init__(model, loader, output_file, config, **kwargs) def _allocate_prediction_maps(self, output_shape, output_heads, output_file): # allocate datasets for probability maps prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions') prediction_maps = [ output_file.create_dataset(dataset_name, shape=output_shape, dtype='float32', chunks=True, compression='gzip') for dataset_name in prediction_datasets] # allocate datasets for normalization masks normalization_datasets = self._get_output_dataset_names(output_heads, prefix='normalization') normalization_masks = [ output_file.create_dataset(dataset_name, shape=output_shape, dtype='uint8', chunks=True, compression='gzip') for dataset_name in normalization_datasets] return prediction_maps, normalization_masks def _save_results(self, prediction_maps, normalization_masks, output_heads, output_file, dataset): if dataset.mirror_padding: logger.warn( f'Mirror padding unsupported in LazyPredictor. Output predictions will be padded with pad_width: {dataset.pad_width}') prediction_datasets = self._get_output_dataset_names(output_heads, prefix='predictions') normalization_datasets = self._get_output_dataset_names(output_heads, prefix='normalization') # normalize the prediction_maps inside the H5 for prediction_map, normalization_mask, prediction_dataset, normalization_dataset in zip(prediction_maps, normalization_masks, prediction_datasets, normalization_datasets): # split the volume into 4 parts and load each into the memory separately logger.info(f'Normalizing {prediction_dataset}...') z, y, x = prediction_map.shape[1:] # take slices which are 1/27 of the original volume patch_shape = (z // 3, y // 3, x // 3) for index in SliceBuilder._build_slices(prediction_map, patch_shape=patch_shape, stride_shape=patch_shape): logger.info(f'Normalizing slice: {index}') prediction_map[index] /= normalization_mask[index] # make sure to reset the slice that has been visited already in order to avoid 'double' normalization # when the patches overlap with each other normalization_mask[index] = 1 logger.info(f'Deleting {normalization_dataset}...') del output_file[normalization_dataset] class EmbeddingsPredictor(_AbstractPredictor): """ Applies the embedding model on the given dataset and saves the result in the `output_file` in the H5 format. The resulting volume is the segmentation itself (not the embedding vectors) obtained by clustering embeddings with HDBSCAN or MeanShift algorithm patch by patch and then stitching the patches together. """ def __init__(self, model, loader, output_file, config, clustering, iou_threshold=0.7, noise_label=-1, **kwargs): super().__init__(model, loader, output_file, config, **kwargs) self.iou_threshold = iou_threshold self.noise_label = noise_label self.clustering = clustering assert clustering in ['hdbscan', 'meanshift'], 'Only HDBSCAN and MeanShift are supported' logger.info(f'IoU threshold: {iou_threshold}') self.clustering_name = clustering self.clustering = self._get_clustering(clustering, kwargs) def predict(self): device = self.config['device'] output_heads = self.config['model'].get('output_heads', 1) logger.info(f'Running prediction on {len(self.loader)} patches...') # dimensionality of the the output segmentation volume_shape = self._volume_shape(self.loader.dataset) logger.info(f'The shape of the output segmentation (DHW): {volume_shape}') logger.info('Allocating segmentation array...') # initialize the output prediction arrays output_segmentations = [np.zeros(volume_shape, dtype='int32') for _ in range(output_heads)] # initialize visited_voxels arrays visited_voxels_arrays = [np.zeros(volume_shape, dtype='uint8') for _ in range(output_heads)] # Sets the module in evaluation mode explicitly self.model.eval() self.model.testing = True # Run predictions on the entire input dataset with torch.no_grad(): for batch, indices in self.loader: # logger.info(f'Predicting embeddings for slice:{index}') # send batch to device batch = batch.to(device) # forward pass embeddings = self.model(batch) # wrap predictions into a list if there is only one output head from the network if output_heads == 1: embeddings = [embeddings] for prediction, output_segmentation, visited_voxels_array in zip(embeddings, output_segmentations, visited_voxels_arrays): # convert to numpy array prediction = prediction.cpu().numpy() # iterate sequentially because of the current simple stitching that we're using for pred, index in zip(prediction, indices): # convert embeddings to segmentation with hdbscan clustering segmentation = self._embeddings_to_segmentation(pred) # stitch patches self._merge_segmentation(segmentation, index, output_segmentation, visited_voxels_array) # save results with h5py.File(self.output_file, 'w') as output_file: prediction_datasets = self._get_output_dataset_names(output_heads, prefix=f'segmentation/{self.clustering_name}') for output_segmentation, prediction_dataset in zip(output_segmentations, prediction_datasets): logger.info(f'Saving predictions to: {output_file}/{prediction_dataset}...') output_file.create_dataset(prediction_dataset, data=output_segmentation, compression="gzip") def _embeddings_to_segmentation(self, embeddings): """ Cluster embeddings vectors with HDBSCAN and return the segmented volume. Args: embeddings (ndarray): 4D (CDHW) embeddings tensor Returns: 3D (DHW) segmentation """ # shape of the output segmentation output_shape = embeddings.shape[1:] # reshape (C, D, H, W) -> (C, D * H * W) and transpose -> (D * H * W, C) flattened_embeddings = embeddings.reshape(embeddings.shape[0], -1).transpose() logger.info('Clustering embeddings...') # perform clustering and reshape in order to get the segmentation volume start = time.time() clusters = self.clustering.fit_predict(flattened_embeddings).reshape(output_shape) logger.info( f'Number of clusters found by {self.clustering}: {np.max(clusters)}. Duration: {time.time() - start} sec.') return clusters def _merge_segmentation(self, segmentation, index, output_segmentation, visited_voxels_array): """ Given the `segmentation` patch, its `index` in the `output_segmentation` array and the array visited voxels merge the segmented patch (`segmentation`) into the `output_segmentation` Args: segmentation (ndarray): segmented patch index (tuple): position of the patch inside `output_segmentation` volume output_segmentation (ndarray): current state of the output segmentation visited_voxels_array (ndarray): array of voxels visited so far (same size as `output_segmentation`); visited voxels will be marked by a number greater than 0 """ index = tuple(index) # get new unassigned label max_label = np.max(output_segmentation) + 1 # make sure there are no clashes between current segmentation patch and the output_segmentation # but keep the noise label noise_mask = segmentation == self.noise_label segmentation += int(max_label) segmentation[noise_mask] = self.noise_label # get the overlap mask in the current patch overlap_mask = visited_voxels_array[index] > 0 # get the new labels inside the overlap_mask new_labels = np.unique(segmentation[overlap_mask]) merged_labels = self._merge_labels(output_segmentation[index], new_labels, segmentation) # relabel new segmentation with the merged labels for current_label, new_label in merged_labels: segmentation[segmentation == new_label] = current_label # update the output_segmentation output_segmentation[index] = segmentation # visit the patch visited_voxels_array[index] += 1 def _merge_labels(self, current_segmentation, new_labels, new_segmentation): def _most_frequent_label(labels): unique, counts =
np.unique(labels, return_counts=True)
numpy.unique
# coding: utf-8 # Licensed under a 3-clause BSD style license - see LICENSE.rst """ Test the Logarithmic Units and Quantities """ from __future__ import (absolute_import, unicode_literals, division, print_function) from ...extern import six from ...extern.six.moves import zip import pickle import itertools import pytest import numpy as np from numpy.testing.utils import assert_allclose from ...tests.helper import assert_quantity_allclose from ... import units as u, constants as c lu_units = [u.dex, u.mag, u.decibel] lu_subclasses = [u.DexUnit, u.MagUnit, u.DecibelUnit] lq_subclasses = [u.Dex, u.Magnitude, u.Decibel] pu_sample = (u.dimensionless_unscaled, u.m, u.g/u.s**2, u.Jy) class TestLogUnitCreation(object): def test_logarithmic_units(self): """Check logarithmic units are set up correctly.""" assert u.dB.to(u.dex) == 0.1 assert u.dex.to(u.mag) == -2.5 assert u.mag.to(u.dB) == -4 @pytest.mark.parametrize('lu_unit, lu_cls', zip(lu_units, lu_subclasses)) def test_callable_units(self, lu_unit, lu_cls): assert isinstance(lu_unit, u.UnitBase) assert callable(lu_unit) assert lu_unit._function_unit_class is lu_cls @pytest.mark.parametrize('lu_unit', lu_units) def test_equality_to_normal_unit_for_dimensionless(self, lu_unit): lu = lu_unit() assert lu == lu._default_function_unit # eg, MagUnit() == u.mag assert lu._default_function_unit == lu # and u.mag == MagUnit() @pytest.mark.parametrize('lu_unit, physical_unit', itertools.product(lu_units, pu_sample)) def test_call_units(self, lu_unit, physical_unit): """Create a LogUnit subclass using the callable unit and physical unit, and do basic check that output is right.""" lu1 = lu_unit(physical_unit) assert lu1.physical_unit == physical_unit assert lu1.function_unit == lu1._default_function_unit def test_call_invalid_unit(self): with pytest.raises(TypeError): u.mag([]) with pytest.raises(ValueError): u.mag(u.mag()) @pytest.mark.parametrize('lu_cls, physical_unit', itertools.product( lu_subclasses + [u.LogUnit], pu_sample)) def test_subclass_creation(self, lu_cls, physical_unit): """Create a LogUnit subclass object for given physical unit, and do basic check that output is right.""" lu1 = lu_cls(physical_unit) assert lu1.physical_unit == physical_unit assert lu1.function_unit == lu1._default_function_unit lu2 = lu_cls(physical_unit, function_unit=2*lu1._default_function_unit) assert lu2.physical_unit == physical_unit assert lu2.function_unit == u.Unit(2*lu2._default_function_unit) with pytest.raises(ValueError): lu_cls(physical_unit, u.m) def test_predefined_magnitudes(): assert_quantity_allclose((-21.1*u.STmag).physical, 1.*u.erg/u.cm**2/u.s/u.AA) assert_quantity_allclose((-48.6*u.ABmag).physical, 1.*u.erg/u.cm**2/u.s/u.Hz) assert_quantity_allclose((0*u.M_bol).physical, c.L_bol0) assert_quantity_allclose((0*u.m_bol).physical, c.L_bol0/(4.*np.pi*(10.*c.pc)**2)) def test_predefined_reinitialisation(): assert u.mag('ST') == u.STmag assert u.mag('AB') == u.ABmag assert u.mag('Bol') == u.M_bol assert u.mag('bol') == u.m_bol def test_predefined_string_roundtrip(): """Ensure roundtripping; see #5015""" with u.magnitude_zero_points.enable(): assert u.Unit(u.STmag.to_string()) == u.STmag assert u.Unit(u.ABmag.to_string()) == u.ABmag assert u.Unit(u.M_bol.to_string()) == u.M_bol assert u.Unit(u.m_bol.to_string()) == u.m_bol def test_inequality(): """Check __ne__ works (regresssion for #5342).""" lu1 = u.mag(u.Jy) lu2 = u.dex(u.Jy) lu3 = u.mag(u.Jy**2) lu4 = lu3 - lu1 assert lu1 != lu2 assert lu1 != lu3 assert lu1 == lu4 class TestLogUnitStrings(object): def test_str(self): """Do some spot checks that str, repr, etc. work as expected.""" lu1 = u.mag(u.Jy) assert str(lu1) == 'mag(Jy)' assert repr(lu1) == 'Unit("mag(Jy)")' assert lu1.to_string('generic') == 'mag(Jy)' with pytest.raises(ValueError): lu1.to_string('fits') lu2 = u.dex() assert str(lu2) == 'dex' assert repr(lu2) == 'Unit("dex(1)")' assert lu2.to_string() == 'dex(1)' lu3 = u.MagUnit(u.Jy, function_unit=2*u.mag) assert str(lu3) == '2 mag(Jy)' assert repr(lu3) == 'MagUnit("Jy", unit="2 mag")' assert lu3.to_string() == '2 mag(Jy)' lu4 = u.mag(u.ct) assert lu4.to_string('generic') == 'mag(ct)' assert lu4.to_string('latex') == ('$\\mathrm{mag}$$\\mathrm{\\left( ' '\\mathrm{ct} \\right)}$') assert lu4._repr_latex_() == lu4.to_string('latex') class TestLogUnitConversion(object): @pytest.mark.parametrize('lu_unit, physical_unit', itertools.product(lu_units, pu_sample)) def test_physical_unit_conversion(self, lu_unit, physical_unit): """Check various LogUnit subclasses are equivalent and convertible to their non-log counterparts.""" lu1 = lu_unit(physical_unit) assert lu1.is_equivalent(physical_unit) assert lu1.to(physical_unit, 0.) == 1. assert physical_unit.is_equivalent(lu1) assert physical_unit.to(lu1, 1.) == 0. pu = u.Unit(8.*physical_unit) assert lu1.is_equivalent(physical_unit) assert lu1.to(pu, 0.) == 0.125 assert pu.is_equivalent(lu1) assert_allclose(pu.to(lu1, 0.125), 0., atol=1.e-15) # Check we round-trip. value = np.linspace(0., 10., 6) assert_allclose(pu.to(lu1, lu1.to(pu, value)), value, atol=1.e-15) # And that we're not just returning True all the time. pu2 = u.g assert not lu1.is_equivalent(pu2) with pytest.raises(u.UnitsError): lu1.to(pu2) assert not pu2.is_equivalent(lu1) with pytest.raises(u.UnitsError): pu2.to(lu1) @pytest.mark.parametrize('lu_unit', lu_units) def test_container_unit_conversion(self, lu_unit): """Check that conversion to logarithmic units (u.mag, u.dB, u.dex) is only possible when the physical unit is dimensionless.""" values = np.linspace(0., 10., 6) lu1 = lu_unit(u.dimensionless_unscaled) assert lu1.is_equivalent(lu1.function_unit) assert_allclose(lu1.to(lu1.function_unit, values), values) lu2 = lu_unit(u.Jy) assert not lu2.is_equivalent(lu2.function_unit) with pytest.raises(u.UnitsError): lu2.to(lu2.function_unit, values) @pytest.mark.parametrize( 'flu_unit, tlu_unit, physical_unit', itertools.product(lu_units, lu_units, pu_sample)) def test_subclass_conversion(self, flu_unit, tlu_unit, physical_unit): """Check various LogUnit subclasses are equivalent and convertible to each other if they correspond to equivalent physical units.""" values = np.linspace(0., 10., 6) flu = flu_unit(physical_unit) tlu = tlu_unit(physical_unit) assert flu.is_equivalent(tlu) assert_allclose(flu.to(tlu), flu.function_unit.to(tlu.function_unit)) assert_allclose(flu.to(tlu, values), values * flu.function_unit.to(tlu.function_unit)) tlu2 = tlu_unit(u.Unit(100.*physical_unit)) assert flu.is_equivalent(tlu2) # Check that we round-trip. assert_allclose(flu.to(tlu2, tlu2.to(flu, values)), values, atol=1.e-15) tlu3 = tlu_unit(physical_unit.to_system(u.si)[0]) assert flu.is_equivalent(tlu3) assert_allclose(flu.to(tlu3, tlu3.to(flu, values)), values, atol=1.e-15) tlu4 = tlu_unit(u.g) assert not flu.is_equivalent(tlu4) with pytest.raises(u.UnitsError): flu.to(tlu4, values) def test_unit_decomposition(self): lu = u.mag(u.Jy) assert lu.decompose() == u.mag(u.Jy.decompose()) assert lu.decompose().physical_unit.bases == [u.kg, u.s] assert lu.si == u.mag(u.Jy.si) assert lu.si.physical_unit.bases == [u.kg, u.s] assert lu.cgs == u.mag(u.Jy.cgs) assert lu.cgs.physical_unit.bases == [u.g, u.s] def test_unit_multiple_possible_equivalencies(self): lu = u.mag(u.Jy) assert lu.is_equivalent(pu_sample) class TestLogUnitArithmetic(object): def test_multiplication_division(self): """Check that multiplication/division with other units is only possible when the physical unit is dimensionless, and that this turns the unit into a normal one.""" lu1 = u.mag(u.Jy) with pytest.raises(u.UnitsError): lu1 * u.m with pytest.raises(u.UnitsError): u.m * lu1 with pytest.raises(u.UnitsError): lu1 / lu1 for unit in (u.dimensionless_unscaled, u.m, u.mag, u.dex): with pytest.raises(u.UnitsError): lu1 / unit lu2 = u.mag(u.dimensionless_unscaled) with pytest.raises(u.UnitsError): lu2 * lu1 with pytest.raises(u.UnitsError): lu2 / lu1 # But dimensionless_unscaled can be cancelled. assert lu2 / lu2 == u.dimensionless_unscaled # With dimensionless, normal units are OK, but we return a plain unit. tf = lu2 * u.m tr = u.m * lu2 for t in (tf, tr): assert not isinstance(t, type(lu2)) assert t == lu2.function_unit * u.m with u.set_enabled_equivalencies(u.logarithmic()): with pytest.raises(u.UnitsError): t.to(lu2.physical_unit) # Now we essentially have a LogUnit with a prefactor of 100, # so should be equivalent again. t = tf / u.cm with u.set_enabled_equivalencies(u.logarithmic()): assert t.is_equivalent(lu2.function_unit) assert_allclose(t.to(u.dimensionless_unscaled, np.arange(3.)/100.), lu2.to(lu2.physical_unit, np.arange(3.))) # If we effectively remove lu1, a normal unit should be returned. t2 = tf / lu2 assert not isinstance(t2, type(lu2)) assert t2 == u.m t3 = tf / lu2.function_unit assert not isinstance(t3, type(lu2)) assert t3 == u.m # For completeness, also ensure non-sensical operations fail with pytest.raises(TypeError): lu1 * object() with pytest.raises(TypeError): slice(None) * lu1 with pytest.raises(TypeError): lu1 / [] with pytest.raises(TypeError): 1 / lu1 @pytest.mark.parametrize('power', (2, 0.5, 1, 0)) def test_raise_to_power(self, power): """Check that raising LogUnits to some power is only possible when the physical unit is dimensionless, and that conversion is turned off when the resulting logarithmic unit (such as mag**2) is incompatible.""" lu1 = u.mag(u.Jy) if power == 0: assert lu1 ** power == u.dimensionless_unscaled elif power == 1: assert lu1 ** power == lu1 else: with pytest.raises(u.UnitsError): lu1 ** power # With dimensionless, though, it works, but returns a normal unit. lu2 = u.mag(u.dimensionless_unscaled) t = lu2**power if power == 0: assert t == u.dimensionless_unscaled elif power == 1: assert t == lu2 else: assert not isinstance(t, type(lu2)) assert t == lu2.function_unit**power # also check we roundtrip t2 = t**(1./power) assert t2 == lu2.function_unit with u.set_enabled_equivalencies(u.logarithmic()): assert_allclose(t2.to(u.dimensionless_unscaled, np.arange(3.)), lu2.to(lu2.physical_unit, np.arange(3.))) @pytest.mark.parametrize('other', pu_sample) def test_addition_subtraction_to_normal_units_fails(self, other): lu1 = u.mag(u.Jy) with pytest.raises(u.UnitsError): lu1 + other with pytest.raises(u.UnitsError): lu1 - other with pytest.raises(u.UnitsError): other - lu1 def test_addition_subtraction_to_non_units_fails(self): lu1 = u.mag(u.Jy) with pytest.raises(TypeError): lu1 + 1. with pytest.raises(TypeError): lu1 - [1., 2., 3.] @pytest.mark.parametrize( 'other', (u.mag, u.mag(), u.mag(u.Jy), u.mag(u.m), u.Unit(2*u.mag), u.MagUnit('', 2.*u.mag))) def test_addition_subtraction(self, other): """Check physical units are changed appropriately""" lu1 = u.mag(u.Jy) other_pu = getattr(other, 'physical_unit', u.dimensionless_unscaled) lu_sf = lu1 + other assert lu_sf.is_equivalent(lu1.physical_unit * other_pu) lu_sr = other + lu1 assert lu_sr.is_equivalent(lu1.physical_unit * other_pu) lu_df = lu1 - other assert lu_df.is_equivalent(lu1.physical_unit / other_pu) lu_dr = other - lu1 assert lu_dr.is_equivalent(other_pu / lu1.physical_unit) def test_complicated_addition_subtraction(self): """for fun, a more complicated example of addition and subtraction""" dm0 = u.Unit('DM', 1./(4.*np.pi*(10.*u.pc)**2)) lu_dm = u.mag(dm0) lu_absST = u.STmag - lu_dm assert lu_absST.is_equivalent(u.erg/u.s/u.AA) def test_neg_pos(self): lu1 = u.mag(u.Jy) neg_lu = -lu1 assert neg_lu != lu1 assert neg_lu.physical_unit == u.Jy**-1 assert -neg_lu == lu1 pos_lu = +lu1 assert pos_lu is not lu1 assert pos_lu == lu1 def test_pickle(): lu1 = u.dex(u.cm/u.s**2) s = pickle.dumps(lu1) lu2 = pickle.loads(s) assert lu1 == lu2 def test_hashable(): lu1 = u.dB(u.mW) lu2 = u.dB(u.m) lu3 = u.dB(u.mW) assert hash(lu1) != hash(lu2) assert hash(lu1) == hash(lu3) luset = {lu1, lu2, lu3} assert len(luset) == 2 class TestLogQuantityCreation(object): @pytest.mark.parametrize('lq, lu', zip(lq_subclasses + [u.LogQuantity], lu_subclasses + [u.LogUnit])) def test_logarithmic_quantities(self, lq, lu): """Check logarithmic quantities are all set up correctly""" assert lq._unit_class == lu assert type(lu()._quantity_class(1.)) is lq @pytest.mark.parametrize('lq_cls, physical_unit', itertools.product(lq_subclasses, pu_sample)) def test_subclass_creation(self, lq_cls, physical_unit): """Create LogQuantity subclass objects for some physical units, and basic check on transformations""" value = np.arange(1., 10.) log_q = lq_cls(value * physical_unit) assert log_q.unit.physical_unit == physical_unit assert log_q.unit.function_unit == log_q.unit._default_function_unit assert_allclose(log_q.physical.value, value) with pytest.raises(ValueError): lq_cls(value, physical_unit) @pytest.mark.parametrize( 'unit', (u.mag, u.mag(), u.mag(u.Jy), u.mag(u.m), u.Unit(2*u.mag), u.MagUnit('', 2.*u.mag), u.MagUnit(u.Jy, -1*u.mag), u.MagUnit(u.m, -2.*u.mag))) def test_different_units(self, unit): q = u.Magnitude(1.23, unit) assert q.unit.function_unit == getattr(unit, 'function_unit', unit) assert q.unit.physical_unit is getattr(unit, 'physical_unit', u.dimensionless_unscaled) @pytest.mark.parametrize('value, unit', ( (1.*u.mag(u.Jy), None), (1.*u.dex(u.Jy), None), (1.*u.mag(u.W/u.m**2/u.Hz), u.mag(u.Jy)), (1.*u.dex(u.W/u.m**2/u.Hz), u.mag(u.Jy)))) def test_function_values(self, value, unit): lq = u.Magnitude(value, unit) assert lq == value assert lq.unit.function_unit == u.mag assert lq.unit.physical_unit == getattr(unit, 'physical_unit', value.unit.physical_unit) @pytest.mark.parametrize( 'unit', (u.mag(), u.mag(u.Jy), u.mag(u.m), u.MagUnit('', 2.*u.mag), u.MagUnit(u.Jy, -1*u.mag), u.MagUnit(u.m, -2.*u.mag))) def test_indirect_creation(self, unit): q1 = 2.5 * unit assert isinstance(q1, u.Magnitude) assert q1.value == 2.5 assert q1.unit == unit pv = 100. * unit.physical_unit q2 = unit * pv assert q2.unit == unit assert q2.unit.physical_unit == pv.unit assert q2.to_value(unit.physical_unit) == 100. assert (q2._function_view / u.mag).to_value(1) == -5. q3 = unit / 0.4 assert q3 == q1 def test_from_view(self): # Cannot view a physical quantity as a function quantity, since the # values would change. q = [100., 1000.] * u.cm/u.s**2 with pytest.raises(TypeError): q.view(u.Dex) # But fine if we have the right magnitude. q = [2., 3.] * u.dex lq = q.view(u.Dex) assert isinstance(lq, u.Dex) assert lq.unit.physical_unit == u.dimensionless_unscaled assert
np.all(q == lq)
numpy.all
"""Routines for numerical differentiation.""" from __future__ import division import numpy as np from numpy.linalg import norm from scipy.sparse.linalg import LinearOperator from ..sparse import issparse, csc_matrix, csr_matrix, coo_matrix, find from ._group_columns import group_dense, group_sparse EPS = np.finfo(np.float64).eps def _adjust_scheme_to_bounds(x0, h, num_steps, scheme, lb, ub): """Adjust final difference scheme to the presence of bounds. Parameters ---------- x0 : ndarray, shape (n,) Point at which we wish to estimate derivative. h : ndarray, shape (n,) Desired finite difference steps. num_steps : int Number of `h` steps in one direction required to implement finite difference scheme. For example, 2 means that we need to evaluate f(x0 + 2 * h) or f(x0 - 2 * h) scheme : {'1-sided', '2-sided'} Whether steps in one or both directions are required. In other words '1-sided' applies to forward and backward schemes, '2-sided' applies to center schemes. lb : ndarray, shape (n,) Lower bounds on independent variables. ub : ndarray, shape (n,) Upper bounds on independent variables. Returns ------- h_adjusted : ndarray, shape (n,) Adjusted step sizes. Step size decreases only if a sign flip or switching to one-sided scheme doesn't allow to take a full step. use_one_sided : ndarray of bool, shape (n,) Whether to switch to one-sided scheme. Informative only for ``scheme='2-sided'``. """ if scheme == '1-sided': use_one_sided = np.ones_like(h, dtype=bool) elif scheme == '2-sided': h = np.abs(h) use_one_sided = np.zeros_like(h, dtype=bool) else: raise ValueError("`scheme` must be '1-sided' or '2-sided'.") if np.all((lb == -np.inf) & (ub == np.inf)): return h, use_one_sided h_total = h * num_steps h_adjusted = h.copy() lower_dist = x0 - lb upper_dist = ub - x0 if scheme == '1-sided': x = x0 + h_total violated = (x < lb) | (x > ub) fitting = np.abs(h_total) <= np.maximum(lower_dist, upper_dist) h_adjusted[violated & fitting] *= -1 forward = (upper_dist >= lower_dist) & ~fitting h_adjusted[forward] = upper_dist[forward] / num_steps backward = (upper_dist < lower_dist) & ~fitting h_adjusted[backward] = -lower_dist[backward] / num_steps elif scheme == '2-sided': central = (lower_dist >= h_total) & (upper_dist >= h_total) forward = (upper_dist >= lower_dist) & ~central h_adjusted[forward] = np.minimum( h[forward], 0.5 * upper_dist[forward] / num_steps) use_one_sided[forward] = True backward = (upper_dist < lower_dist) & ~central h_adjusted[backward] = -np.minimum( h[backward], 0.5 * lower_dist[backward] / num_steps) use_one_sided[backward] = True min_dist = np.minimum(upper_dist, lower_dist) / num_steps adjusted_central = (~central & (np.abs(h_adjusted) <= min_dist)) h_adjusted[adjusted_central] = min_dist[adjusted_central] use_one_sided[adjusted_central] = False return h_adjusted, use_one_sided relative_step = {"2-point": EPS**0.5, "3-point": EPS**(1/3), "cs": EPS**0.5} def _compute_absolute_step(rel_step, x0, method): if rel_step is None: rel_step = relative_step[method] sign_x0 = (x0 >= 0).astype(float) * 2 - 1 return rel_step * sign_x0 * np.maximum(1.0, np.abs(x0)) def _prepare_bounds(bounds, x0): lb, ub = [np.asarray(b, dtype=float) for b in bounds] if lb.ndim == 0: lb = np.resize(lb, x0.shape) if ub.ndim == 0: ub = np.resize(ub, x0.shape) return lb, ub def group_columns(A, order=0): """Group columns of a 2-D matrix for sparse finite differencing [1]_. Two columns are in the same group if in each row at least one of them has zero. A greedy sequential algorithm is used to construct groups. Parameters ---------- A : array_like or sparse matrix, shape (m, n) Matrix of which to group columns. order : int, iterable of int with shape (n,) or None Permutation array which defines the order of columns enumeration. If int or None, a random permutation is used with `order` used as a random seed. Default is 0, that is use a random permutation but guarantee repeatability. Returns ------- groups : ndarray of int, shape (n,) Contains values from 0 to n_groups-1, where n_groups is the number of found groups. Each value ``groups[i]`` is an index of a group to which ith column assigned. The procedure was helpful only if n_groups is significantly less than n. References ---------- .. [1] <NAME>, <NAME>, and <NAME>, "On the estimation of sparse Jacobian matrices", Journal of the Institute of Mathematics and its Applications, 13 (1974), pp. 117-120. """ if issparse(A): A = csc_matrix(A) else: A =
np.atleast_2d(A)
numpy.atleast_2d
import copy import functools import itertools import numbers import warnings from collections import defaultdict from datetime import timedelta from distutils.version import LooseVersion from typing import ( Any, Dict, Hashable, Mapping, Optional, Sequence, Tuple, TypeVar, Union, ) import numpy as np import pandas as pd import xarray as xr # only for Dataset and DataArray from . import arithmetic, common, dtypes, duck_array_ops, indexing, nputils, ops, utils from .indexing import ( BasicIndexer, OuterIndexer, PandasIndexAdapter, VectorizedIndexer, as_indexable, ) from .npcompat import IS_NEP18_ACTIVE from .options import _get_keep_attrs from .pycompat import ( cupy_array_type, dask_array_type, integer_types, is_duck_dask_array, ) from .utils import ( OrderedSet, _default, decode_numpy_dict_values, drop_dims_from_indexers, either_dict_or_kwargs, ensure_us_time_resolution, infix_dims, is_duck_array, ) NON_NUMPY_SUPPORTED_ARRAY_TYPES = ( ( indexing.ExplicitlyIndexed, pd.Index, ) + dask_array_type + cupy_array_type ) # https://github.com/python/mypy/issues/224 BASIC_INDEXING_TYPES = integer_types + (slice,) # type: ignore VariableType = TypeVar("VariableType", bound="Variable") """Type annotation to be used when methods of Variable return self or a copy of self. When called from an instance of a subclass, e.g. IndexVariable, mypy identifies the output as an instance of the subclass. Usage:: class Variable: def f(self: VariableType, ...) -> VariableType: ... """ class MissingDimensionsError(ValueError): """Error class used when we can't safely guess a dimension name.""" # inherits from ValueError for backward compatibility # TODO: move this to an xarray.exceptions module? def as_variable(obj, name=None) -> "Union[Variable, IndexVariable]": """Convert an object into a Variable. Parameters ---------- obj : object Object to convert into a Variable. - If the object is already a Variable, return a shallow copy. - Otherwise, if the object has 'dims' and 'data' attributes, convert it into a new Variable. - If all else fails, attempt to convert the object into a Variable by unpacking it into the arguments for creating a new Variable. name : str, optional If provided: - `obj` can be a 1D array, which is assumed to label coordinate values along a dimension of this given name. - Variables with name matching one of their dimensions are converted into `IndexVariable` objects. Returns ------- var : Variable The newly created variable. """ from .dataarray import DataArray # TODO: consider extending this method to automatically handle Iris and if isinstance(obj, DataArray): # extract the primary Variable from DataArrays obj = obj.variable if isinstance(obj, Variable): obj = obj.copy(deep=False) elif isinstance(obj, tuple): try: obj = Variable(*obj) except (TypeError, ValueError) as error: # use .format() instead of % because it handles tuples consistently raise error.__class__( "Could not convert tuple of form " "(dims, data[, attrs, encoding]): " "{} to Variable.".format(obj) ) elif utils.is_scalar(obj): obj = Variable([], obj) elif isinstance(obj, (pd.Index, IndexVariable)) and obj.name is not None: obj = Variable(obj.name, obj) elif isinstance(obj, (set, dict)): raise TypeError("variable {!r} has invalid type {!r}".format(name, type(obj))) elif name is not None: data = as_compatible_data(obj) if data.ndim != 1: raise MissingDimensionsError( "cannot set variable %r with %r-dimensional data " "without explicit dimension names. Pass a tuple of " "(dims, data) instead." % (name, data.ndim) ) obj = Variable(name, data, fastpath=True) else: raise TypeError( "unable to convert object into a variable without an " "explicit list of dimensions: %r" % obj ) if name is not None and name in obj.dims: # convert the Variable into an Index if obj.ndim != 1: raise MissingDimensionsError( "%r has more than 1-dimension and the same name as one of its " "dimensions %r. xarray disallows such variables because they " "conflict with the coordinates used to label " "dimensions." % (name, obj.dims) ) obj = obj.to_index_variable() return obj def _maybe_wrap_data(data): """ Put pandas.Index and numpy.ndarray arguments in adapter objects to ensure they can be indexed properly. NumpyArrayAdapter, PandasIndexAdapter and LazilyOuterIndexedArray should all pass through unmodified. """ if isinstance(data, pd.Index): return PandasIndexAdapter(data) return data def _possibly_convert_objects(values): """Convert arrays of datetime.datetime and datetime.timedelta objects into datetime64 and timedelta64, according to the pandas convention. Also used for validating that datetime64 and timedelta64 objects are within the valid date range for ns precision, as pandas will raise an error if they are not. """ return np.asarray(pd.Series(values.ravel())).reshape(values.shape) def as_compatible_data(data, fastpath=False): """Prepare and wrap data to put in a Variable. - If data does not have the necessary attributes, convert it to ndarray. - If data has dtype=datetime64, ensure that it has ns precision. If it's a pandas.Timestamp, convert it to datetime64. - If data is already a pandas or xarray object (other than an Index), just use the values. Finally, wrap it up with an adapter if necessary. """ if fastpath and getattr(data, "ndim", 0) > 0: # can't use fastpath (yet) for scalars return _maybe_wrap_data(data) if isinstance(data, Variable): return data.data if isinstance(data, NON_NUMPY_SUPPORTED_ARRAY_TYPES): return _maybe_wrap_data(data) if isinstance(data, tuple): data = utils.to_0d_object_array(data) if isinstance(data, pd.Timestamp): # TODO: convert, handle datetime objects, too data = np.datetime64(data.value, "ns") if isinstance(data, timedelta): data = np.timedelta64(getattr(data, "value", data), "ns") # we don't want nested self-described arrays data = getattr(data, "values", data) if isinstance(data, np.ma.MaskedArray): mask = np.ma.getmaskarray(data) if mask.any(): dtype, fill_value = dtypes.maybe_promote(data.dtype) data = np.asarray(data, dtype=dtype) data[mask] = fill_value else: data = np.asarray(data) if not isinstance(data, np.ndarray): if hasattr(data, "__array_function__"): if IS_NEP18_ACTIVE: return data else: raise TypeError( "Got an NumPy-like array type providing the " "__array_function__ protocol but NEP18 is not enabled. " "Check that numpy >= v1.16 and that the environment " 'variable "NUMPY_EXPERIMENTAL_ARRAY_FUNCTION" is set to ' '"1"' ) # validate whether the data is valid data types. data =
np.asarray(data)
numpy.asarray
import matplotlib.pyplot as plt import numpy as np from fears.utils import results_manager, plotter, dir_manager import os suffix = '07212021_0001' data_folder = 'results_' + suffix exp_info_file = 'experiment_info_' + suffix + '.p' exp_folders,exp_info = results_manager.get_experiment_results(data_folder, exp_info_file) max_cells = exp_info.populations[0].max_cells n_sims = exp_info.n_sims k_abs = exp_info.slopes exp_folders.reverse() k_abs = np.flip(k_abs) fig,ax = plt.subplots(nrows=2,ncols=2,figsize=(4,4)) pop = exp_info.populations[0] ax = ax.reshape((len(k_abs),)) axnum = 0 tc_axes=[] drug_axes=[] for exp in exp_folders: k_abs_t = exp[exp.find('=')+1:] k_abs_t = float(k_abs_t) num = np.argwhere(k_abs == k_abs_t) num = num[0,0] # generate timecourse axes tcax = ax[axnum] # da = tcax.twinx() sim_files = os.listdir(path=exp) sim_files = sorted(sim_files) survive_count = 0 counts_total = None k=0 while k < len(sim_files): # for sim in sim_files: sim = sim_files[k] sim = exp + os.sep + sim data = results_manager.get_data(sim) dc = data[:,-1] data = data[:,0:-1] # data = data/np.max(data) data_t = data[-1,:] # check to see if any genotypes are at least 10% of the max cell count if any(data_t >= 1): survive_count += 1 if counts_total is None: counts_total = data else: counts_total += data # data = data/np.max(data) # exp_info.populations[num].counts_log_scale = True data = data/max_cells if k==0: drug_kwargs = {'alpha':0.7, 'color':'black', 'linewidth':2, 'label':'Drug Concentration ($\u03BC$M)' } tcax,drug_ax = plotter.plot_timecourse_to_axes(exp_info.populations[num], data, tcax, drug_curve=dc, drug_ax_sci_notation=True, drug_kwargs=drug_kwargs, legend_labels=False, grayscale=True, color='gray', linewidth=1, labelsize=12, alpha=0.7 ) drug_ax.set_ylabel('') drug_axes.append( drug_ax ) else: tcax,da = plotter.plot_timecourse_to_axes(exp_info.populations[num], data, tcax, grayscale=True, color='gray', legend_labels=False, linewidth=2, labelsize=12, alpha=0.2 ) # drug_ax.set_ylim(0,10**4) k+=1 if survive_count > 0: counts_avg = counts_total/survive_count # counts_avg = counts_avg/np.max(counts_avg) # counts_avg = counts_total counts_avg = counts_avg/
np.max(counts_avg)
numpy.max
# ________ # / # \ / # \ / # \/ import random import textwrap import emd_mean import AdvEMDpy import emd_basis import emd_utils import numpy as np import pandas as pd import cvxpy as cvx import seaborn as sns import matplotlib.pyplot as plt from scipy.integrate import odeint from scipy.ndimage import gaussian_filter from emd_utils import time_extension, Utility from scipy.interpolate import CubicSpline from emd_hilbert import Hilbert, hilbert_spectrum from emd_preprocess import Preprocess from emd_mean import Fluctuation from AdvEMDpy import EMD # alternate packages from PyEMD import EMD as pyemd0215 import emd as emd040 sns.set(style='darkgrid') pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001) pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time) pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series) # plot 0 - addition fig = plt.figure(figsize=(9, 4)) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('First Iteration of Sifting Algorithm') plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1) plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()], c='r', label=r'$M(t_i)$', zorder=2) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4) plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()], c='c', label=r'$m(t_j)$', zorder=3) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5) plt.yticks(ticks=[-2, -1, 0, 1, 2]) plt.xticks(ticks=[0, np.pi, 2 * np.pi], labels=[r'0', r'$\pi$', r'$2\pi$']) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/pseudo_algorithm.png') plt.show() knots = np.arange(12) time = np.linspace(0, 11, 1101) basis = emd_basis.Basis(time=time, time_series=time) b_spline_basis = basis.cubic_b_spline(knots) chsi_basis = basis.chsi_basis(knots) # plot 1 plt.title('Non-Natural Cubic B-Spline Bases at Boundary') plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $') plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $') plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $') plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $') plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $') plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $']) plt.xlim(4.4, 6.6) plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.legend(loc='upper left') plt.savefig('jss_figures/boundary_bases.png') plt.show() # plot 1a - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) knots_uniform = np.linspace(0, 2 * np.pi, 51) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0] fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Uniform Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Uniform Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Uniform Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots_uniform)): axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_uniform.png') plt.show() # plot 1b - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=1, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Statically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Statically Optimised Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Statically Optimised Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots)): axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_1.png') plt.show() # plot 1c - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=2, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Dynamically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Dynamically Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Dynamically Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots[i])): axs[i].plot(knots[i][j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_2.png') plt.show() # plot 1d - addition window = 81 fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Filtering Demonstration') axs[1].set_title('Zoomed Region') preprocess_time = pseudo_alg_time.copy() np.random.seed(1) random.seed(1) preprocess_time_series = pseudo_alg_time_series + np.random.normal(0, 0.1, len(preprocess_time)) for i in random.sample(range(1000), 500): preprocess_time_series[i] += np.random.normal(0, 1) preprocess = Preprocess(time=preprocess_time, time_series=preprocess_time_series) axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[0].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[0].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[0].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[1].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[1].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[1].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_filter.png') plt.show() # plot 1e - addition fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Smoothing Demonstration') axs[1].set_title('Zoomed Region') axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[0].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) downsampled_and_decimated = preprocess.downsample() axs[0].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 11)) downsampled = preprocess.downsample(decimate=False) axs[0].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[1].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) axs[1].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 13)) axs[1].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.06, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.06, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_smooth.png') plt.show() # plot 2 fig, axs = plt.subplots(1, 2, sharey=True) axs[0].set_title('Cubic B-Spline Bases') axs[0].plot(time, b_spline_basis[2, :].T, '--', label='Basis 1') axs[0].plot(time, b_spline_basis[3, :].T, '--', label='Basis 2') axs[0].plot(time, b_spline_basis[4, :].T, '--', label='Basis 3') axs[0].plot(time, b_spline_basis[5, :].T, '--', label='Basis 4') axs[0].legend(loc='upper left') axs[0].plot(5 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].plot(6 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].set_xticks([5, 6]) axs[0].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[0].set_xlim(4.5, 6.5) axs[1].set_title('Cubic Hermite Spline Bases') axs[1].plot(time, chsi_basis[10, :].T, '--') axs[1].plot(time, chsi_basis[11, :].T, '--') axs[1].plot(time, chsi_basis[12, :].T, '--') axs[1].plot(time, chsi_basis[13, :].T, '--') axs[1].plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].set_xticks([5, 6]) axs[1].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[1].set_xlim(4.5, 6.5) plt.savefig('jss_figures/comparing_bases.png') plt.show() # plot 3 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_dash = maxima_y[-1] * np.ones_like(max_dash_time) min_dash_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_dash = minima_y[-1] * np.ones_like(min_dash_time) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) max_discard = maxima_y[-1] max_discard_time = minima_x[-1] - maxima_x[-1] + minima_x[-1] max_discard_dash_time = np.linspace(max_discard_time - width, max_discard_time + width, 101) max_discard_dash = max_discard * np.ones_like(max_discard_dash_time) dash_2_time = np.linspace(minima_x[-1], max_discard_time, 101) dash_2 = np.linspace(minima_y[-1], max_discard, 101) end_point_time = time[-1] end_point = time_series[-1] time_reflect = np.linspace((5 - a) * np.pi, (5 + a) * np.pi, 101) time_series_reflect = np.flip(np.cos(np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101)) + np.cos(5 * np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101))) time_series_anti_reflect = time_series_reflect[0] - time_series_reflect utils = emd_utils.Utility(time=time, time_series=time_series_anti_reflect) anti_max_bool = utils.max_bool_func_1st_order_fd() anti_max_point_time = time_reflect[anti_max_bool] anti_max_point = time_series_anti_reflect[anti_max_bool] utils = emd_utils.Utility(time=time, time_series=time_series_reflect) no_anchor_max_time = time_reflect[utils.max_bool_func_1st_order_fd()] no_anchor_max = time_series_reflect[utils.max_bool_func_1st_order_fd()] point_1 = 5.4 length_distance = np.linspace(maxima_y[-1], minima_y[-1], 101) length_distance_time = point_1 * np.pi * np.ones_like(length_distance) length_time = np.linspace(point_1 * np.pi - width, point_1 * np.pi + width, 101) length_top = maxima_y[-1] * np.ones_like(length_time) length_bottom = minima_y[-1] * np.ones_like(length_time) point_2 = 5.2 length_distance_2 = np.linspace(time_series[-1], minima_y[-1], 101) length_distance_time_2 = point_2 * np.pi * np.ones_like(length_distance_2) length_time_2 = np.linspace(point_2 * np.pi - width, point_2 * np.pi + width, 101) length_top_2 = time_series[-1] * np.ones_like(length_time_2) length_bottom_2 = minima_y[-1] * np.ones_like(length_time_2) symmetry_axis_1_time = minima_x[-1] * np.ones(101) symmetry_axis_2_time = time[-1] * np.ones(101) symmetry_axis = np.linspace(-2, 2, 101) end_time = np.linspace(time[-1] - width, time[-1] + width, 101) end_signal = time_series[-1] * np.ones_like(end_time) anti_symmetric_time = np.linspace(time[-1] - 0.5, time[-1] + 0.5, 101) anti_symmetric_signal = time_series[-1] * np.ones_like(anti_symmetric_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Symmetry Edge Effects Example') plt.plot(time_reflect, time_series_reflect, 'g--', LineWidth=2, label=textwrap.fill('Symmetric signal', 10)) plt.plot(time_reflect[:51], time_series_anti_reflect[:51], '--', c='purple', LineWidth=2, label=textwrap.fill('Anti-symmetric signal', 10)) plt.plot(max_dash_time, max_dash, 'k-') plt.plot(min_dash_time, min_dash, 'k-') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(length_distance_time, length_distance, 'k--') plt.plot(length_distance_time_2, length_distance_2, 'k--') plt.plot(length_time, length_top, 'k-') plt.plot(length_time, length_bottom, 'k-') plt.plot(length_time_2, length_top_2, 'k-') plt.plot(length_time_2, length_bottom_2, 'k-') plt.plot(end_time, end_signal, 'k-') plt.plot(symmetry_axis_1_time, symmetry_axis, 'r--', zorder=1) plt.plot(anti_symmetric_time, anti_symmetric_signal, 'r--', zorder=1) plt.plot(symmetry_axis_2_time, symmetry_axis, 'r--', label=textwrap.fill('Axes of symmetry', 10), zorder=1) plt.text(5.1 * np.pi, -0.7, r'$\beta$L') plt.text(5.34 * np.pi, -0.05, 'L') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(max_discard_time, max_discard, c='purple', zorder=4, label=textwrap.fill('Symmetric Discard maxima', 10)) plt.scatter(end_point_time, end_point, c='orange', zorder=4, label=textwrap.fill('Symmetric Anchor maxima', 10)) plt.scatter(anti_max_point_time, anti_max_point, c='green', zorder=4, label=textwrap.fill('Anti-Symmetric maxima', 10)) plt.scatter(no_anchor_max_time, no_anchor_max, c='gray', zorder=4, label=textwrap.fill('Symmetric maxima', 10)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_symmetry_anti.png') plt.show() # plot 4 a = 0.21 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_1 = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_dash_2 = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_dash_time_1 = maxima_x[-1] * np.ones_like(max_dash_1) max_dash_time_2 = maxima_x[-2] * np.ones_like(max_dash_1) min_dash_1 = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_dash_2 = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_dash_time_1 = minima_x[-1] * np.ones_like(min_dash_1) min_dash_time_2 = minima_x[-2] * np.ones_like(min_dash_1) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) dash_2_time = np.linspace(maxima_x[-1], minima_x[-2], 101) dash_2 = np.linspace(maxima_y[-1], minima_y[-2], 101) s1 = (minima_y[-2] - maxima_y[-1]) / (minima_x[-2] - maxima_x[-1]) slope_based_maximum_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) slope_based_maximum = minima_y[-1] + (slope_based_maximum_time - minima_x[-1]) * s1 max_dash_time_3 = slope_based_maximum_time * np.ones_like(max_dash_1) max_dash_3 = np.linspace(slope_based_maximum - width, slope_based_maximum + width, 101) dash_3_time = np.linspace(minima_x[-1], slope_based_maximum_time, 101) dash_3 = np.linspace(minima_y[-1], slope_based_maximum, 101) s2 = (minima_y[-1] - maxima_y[-1]) / (minima_x[-1] - maxima_x[-1]) slope_based_minimum_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) slope_based_minimum = slope_based_maximum - (slope_based_maximum_time - slope_based_minimum_time) * s2 min_dash_time_3 = slope_based_minimum_time * np.ones_like(min_dash_1) min_dash_3 = np.linspace(slope_based_minimum - width, slope_based_minimum + width, 101) dash_4_time = np.linspace(slope_based_maximum_time, slope_based_minimum_time) dash_4 = np.linspace(slope_based_maximum, slope_based_minimum) maxima_dash = np.linspace(2.5 - width, 2.5 + width, 101) maxima_dash_time_1 = maxima_x[-2] * np.ones_like(maxima_dash) maxima_dash_time_2 = maxima_x[-1] * np.ones_like(maxima_dash) maxima_dash_time_3 = slope_based_maximum_time * np.ones_like(maxima_dash) maxima_line_dash_time = np.linspace(maxima_x[-2], slope_based_maximum_time, 101) maxima_line_dash = 2.5 * np.ones_like(maxima_line_dash_time) minima_dash = np.linspace(-3.4 - width, -3.4 + width, 101) minima_dash_time_1 = minima_x[-2] * np.ones_like(minima_dash) minima_dash_time_2 = minima_x[-1] * np.ones_like(minima_dash) minima_dash_time_3 = slope_based_minimum_time * np.ones_like(minima_dash) minima_line_dash_time = np.linspace(minima_x[-2], slope_based_minimum_time, 101) minima_line_dash = -3.4 * np.ones_like(minima_line_dash_time) # slightly edit signal to make difference between slope-based method and improved slope-based method more clear time_series[time >= minima_x[-1]] = 1.5 * (time_series[time >= minima_x[-1]] - time_series[time == minima_x[-1]]) + \ time_series[time == minima_x[-1]] improved_slope_based_maximum_time = time[-1] improved_slope_based_maximum = time_series[-1] improved_slope_based_minimum_time = slope_based_minimum_time improved_slope_based_minimum = improved_slope_based_maximum + s2 * (improved_slope_based_minimum_time - improved_slope_based_maximum_time) min_dash_4 = np.linspace(improved_slope_based_minimum - width, improved_slope_based_minimum + width, 101) min_dash_time_4 = improved_slope_based_minimum_time * np.ones_like(min_dash_4) dash_final_time = np.linspace(improved_slope_based_maximum_time, improved_slope_based_minimum_time, 101) dash_final = np.linspace(improved_slope_based_maximum, improved_slope_based_minimum, 101) ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 0.9 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Slope-Based Edge Effects Example') plt.plot(max_dash_time_1, max_dash_1, 'k-') plt.plot(max_dash_time_2, max_dash_2, 'k-') plt.plot(max_dash_time_3, max_dash_3, 'k-') plt.plot(min_dash_time_1, min_dash_1, 'k-') plt.plot(min_dash_time_2, min_dash_2, 'k-') plt.plot(min_dash_time_3, min_dash_3, 'k-') plt.plot(min_dash_time_4, min_dash_4, 'k-') plt.plot(maxima_dash_time_1, maxima_dash, 'k-') plt.plot(maxima_dash_time_2, maxima_dash, 'k-') plt.plot(maxima_dash_time_3, maxima_dash, 'k-') plt.plot(minima_dash_time_1, minima_dash, 'k-') plt.plot(minima_dash_time_2, minima_dash, 'k-') plt.plot(minima_dash_time_3, minima_dash, 'k-') plt.text(4.34 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.74 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.12 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.50 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.30 * np.pi, 0.35, r'$s_1$') plt.text(4.43 * np.pi, -0.20, r'$s_2$') plt.text(4.30 * np.pi + (minima_x[-1] - minima_x[-2]), 0.35 + (minima_y[-1] - minima_y[-2]), r'$s_1$') plt.text(4.43 * np.pi + (slope_based_minimum_time - minima_x[-1]), -0.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.text(4.50 * np.pi + (slope_based_minimum_time - minima_x[-1]), 1.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.plot(minima_line_dash_time, minima_line_dash, 'k--') plt.plot(maxima_line_dash_time, maxima_line_dash, 'k--') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(dash_3_time, dash_3, 'k--') plt.plot(dash_4_time, dash_4, 'k--') plt.plot(dash_final_time, dash_final, 'k--') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(slope_based_maximum_time, slope_based_maximum, c='orange', zorder=4, label=textwrap.fill('Slope-based maximum', 11)) plt.scatter(slope_based_minimum_time, slope_based_minimum, c='purple', zorder=4, label=textwrap.fill('Slope-based minimum', 11)) plt.scatter(improved_slope_based_maximum_time, improved_slope_based_maximum, c='deeppink', zorder=4, label=textwrap.fill('Improved slope-based maximum', 11)) plt.scatter(improved_slope_based_minimum_time, improved_slope_based_minimum, c='dodgerblue', zorder=4, label=textwrap.fill('Improved slope-based minimum', 11)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-3, -2, -1, 0, 1, 2), ('-3', '-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_slope_based.png') plt.show() # plot 5 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] A2 = np.abs(maxima_y[-2] - minima_y[-2]) / 2 A1 = np.abs(maxima_y[-1] - minima_y[-1]) / 2 P2 = 2 * np.abs(maxima_x[-2] - minima_x[-2]) P1 = 2 * np.abs(maxima_x[-1] - minima_x[-1]) Huang_time = (P1 / P2) * (time[time >= maxima_x[-2]] - time[time == maxima_x[-2]]) + maxima_x[-1] Huang_wave = (A1 / A2) * (time_series[time >= maxima_x[-2]] - time_series[time == maxima_x[-2]]) + maxima_y[-1] Coughlin_time = Huang_time Coughlin_wave = A1 * np.cos(2 * np.pi * (1 / P1) * (Coughlin_time - Coughlin_time[0])) Average_max_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) Average_max = (maxima_y[-2] + maxima_y[-1]) / 2 Average_min_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) Average_min = (minima_y[-2] + minima_y[-1]) / 2 utils_Huang = emd_utils.Utility(time=time, time_series=Huang_wave) Huang_max_bool = utils_Huang.max_bool_func_1st_order_fd() Huang_min_bool = utils_Huang.min_bool_func_1st_order_fd() utils_Coughlin = emd_utils.Utility(time=time, time_series=Coughlin_wave) Coughlin_max_bool = utils_Coughlin.max_bool_func_1st_order_fd() Coughlin_min_bool = utils_Coughlin.min_bool_func_1st_order_fd() Huang_max_time = Huang_time[Huang_max_bool] Huang_max = Huang_wave[Huang_max_bool] Huang_min_time = Huang_time[Huang_min_bool] Huang_min = Huang_wave[Huang_min_bool] Coughlin_max_time = Coughlin_time[Coughlin_max_bool] Coughlin_max = Coughlin_wave[Coughlin_max_bool] Coughlin_min_time = Coughlin_time[Coughlin_min_bool] Coughlin_min = Coughlin_wave[Coughlin_min_bool] max_2_x_time = np.linspace(maxima_x[-2] - width, maxima_x[-2] + width, 101) max_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) max_2_x = maxima_y[-2] * np.ones_like(max_2_x_time) min_2_x_time = np.linspace(minima_x[-2] - width, minima_x[-2] + width, 101) min_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) min_2_x = minima_y[-2] * np.ones_like(min_2_x_time) dash_max_min_2_x = np.linspace(minima_y[-2], maxima_y[-2], 101) dash_max_min_2_x_time = 5.3 * np.pi * np.ones_like(dash_max_min_2_x) max_2_y = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) max_2_y_time = maxima_x[-2] * np.ones_like(max_2_y) min_2_y = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) min_2_y_time = minima_x[-2] * np.ones_like(min_2_y) dash_max_min_2_y_time = np.linspace(minima_x[-2], maxima_x[-2], 101) dash_max_min_2_y = -1.8 * np.ones_like(dash_max_min_2_y_time) max_1_x_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) max_1_x = maxima_y[-1] * np.ones_like(max_1_x_time) min_1_x_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) min_1_x = minima_y[-1] * np.ones_like(min_1_x_time) dash_max_min_1_x = np.linspace(minima_y[-1], maxima_y[-1], 101) dash_max_min_1_x_time = 5.4 * np.pi * np.ones_like(dash_max_min_1_x) max_1_y = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) max_1_y_time = maxima_x[-1] * np.ones_like(max_1_y) min_1_y = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) min_1_y_time = minima_x[-1] * np.ones_like(min_1_y) dash_max_min_1_y_time = np.linspace(minima_x[-1], maxima_x[-1], 101) dash_max_min_1_y = -2.1 * np.ones_like(dash_max_min_1_y_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Characteristic Wave Effects Example') plt.plot(time, time_series, LineWidth=2, label='Signal') plt.scatter(Huang_max_time, Huang_max, c='magenta', zorder=4, label=textwrap.fill('Huang maximum', 10)) plt.scatter(Huang_min_time, Huang_min, c='lime', zorder=4, label=textwrap.fill('Huang minimum', 10)) plt.scatter(Coughlin_max_time, Coughlin_max, c='darkorange', zorder=4, label=textwrap.fill('Coughlin maximum', 14)) plt.scatter(Coughlin_min_time, Coughlin_min, c='dodgerblue', zorder=4, label=textwrap.fill('Coughlin minimum', 14)) plt.scatter(Average_max_time, Average_max, c='orangered', zorder=4, label=textwrap.fill('Average maximum', 14)) plt.scatter(Average_min_time, Average_min, c='cyan', zorder=4, label=textwrap.fill('Average minimum', 14)) plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.plot(Huang_time, Huang_wave, '--', c='darkviolet', label=textwrap.fill('Huang Characteristic Wave', 14)) plt.plot(Coughlin_time, Coughlin_wave, '--', c='darkgreen', label=textwrap.fill('Coughlin Characteristic Wave', 14)) plt.plot(max_2_x_time, max_2_x, 'k-') plt.plot(max_2_x_time_side, max_2_x, 'k-') plt.plot(min_2_x_time, min_2_x, 'k-') plt.plot(min_2_x_time_side, min_2_x, 'k-') plt.plot(dash_max_min_2_x_time, dash_max_min_2_x, 'k--') plt.text(5.16 * np.pi, 0.85, r'$2a_2$') plt.plot(max_2_y_time, max_2_y, 'k-') plt.plot(max_2_y_time, max_2_y_side, 'k-') plt.plot(min_2_y_time, min_2_y, 'k-') plt.plot(min_2_y_time, min_2_y_side, 'k-') plt.plot(dash_max_min_2_y_time, dash_max_min_2_y, 'k--') plt.text(4.08 * np.pi, -2.2, r'$\frac{p_2}{2}$') plt.plot(max_1_x_time, max_1_x, 'k-') plt.plot(max_1_x_time_side, max_1_x, 'k-') plt.plot(min_1_x_time, min_1_x, 'k-') plt.plot(min_1_x_time_side, min_1_x, 'k-') plt.plot(dash_max_min_1_x_time, dash_max_min_1_x, 'k--') plt.text(5.42 * np.pi, -0.1, r'$2a_1$') plt.plot(max_1_y_time, max_1_y, 'k-') plt.plot(max_1_y_time, max_1_y_side, 'k-') plt.plot(min_1_y_time, min_1_y, 'k-') plt.plot(min_1_y_time, min_1_y_side, 'k-') plt.plot(dash_max_min_1_y_time, dash_max_min_1_y, 'k--') plt.text(4.48 * np.pi, -2.5, r'$\frac{p_1}{2}$') plt.xlim(3.9 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_characteristic_wave.png') plt.show() # plot 6 t = np.linspace(5, 95, 100) signal_orig = np.cos(2 * np.pi * t / 50) + 0.6 * np.cos(2 * np.pi * t / 25) + 0.5 * np.sin(2 * np.pi * t / 200) util_nn = emd_utils.Utility(time=t, time_series=signal_orig) maxima = signal_orig[util_nn.max_bool_func_1st_order_fd()] minima = signal_orig[util_nn.min_bool_func_1st_order_fd()] cs_max = CubicSpline(t[util_nn.max_bool_func_1st_order_fd()], maxima) cs_min = CubicSpline(t[util_nn.min_bool_func_1st_order_fd()], minima) time = np.linspace(0, 5 * np.pi, 1001) lsq_signal = np.cos(time) + np.cos(5 * time) knots = np.linspace(0, 5 * np.pi, 101) time_extended = time_extension(time) time_series_extended = np.zeros_like(time_extended) / 0 time_series_extended[int(len(lsq_signal) - 1):int(2 * (len(lsq_signal) - 1) + 1)] = lsq_signal neural_network_m = 200 neural_network_k = 100 # forward -> P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[(-(neural_network_m + neural_network_k - col)):(-(neural_network_m - col))] P[-1, col] = 1 # for additive constant t = lsq_signal[-neural_network_m:] # test - top seed_weights = np.ones(neural_network_k) / neural_network_k weights = 0 * seed_weights.copy() train_input = P[:-1, :] lr = 0.01 for iterations in range(1000): output = np.matmul(weights, train_input) error = (t - output) gradients = error * (- train_input) # guess average gradients average_gradients = np.mean(gradients, axis=1) # steepest descent max_gradient_vector = average_gradients * (np.abs(average_gradients) == max(np.abs(average_gradients))) adjustment = - lr * average_gradients # adjustment = - lr * max_gradient_vector weights += adjustment # test - bottom weights_right = np.hstack((weights, 0)) max_count_right = 0 min_count_right = 0 i_right = 0 while ((max_count_right < 1) or (min_count_right < 1)) and (i_right < len(lsq_signal) - 1): time_series_extended[int(2 * (len(lsq_signal) - 1) + 1 + i_right)] = \ sum(weights_right * np.hstack((time_series_extended[ int(2 * (len(lsq_signal) - 1) + 1 - neural_network_k + i_right): int(2 * (len(lsq_signal) - 1) + 1 + i_right)], 1))) i_right += 1 if i_right > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_right += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_right += 1 # backward <- P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[int(col + 1):int(col + neural_network_k + 1)] P[-1, col] = 1 # for additive constant t = lsq_signal[:neural_network_m] vx = cvx.Variable(int(neural_network_k + 1)) objective = cvx.Minimize(cvx.norm((2 * (vx * P) + 1 - t), 2)) # linear activation function is arbitrary prob = cvx.Problem(objective) result = prob.solve(verbose=True, solver=cvx.ECOS) weights_left = np.array(vx.value) max_count_left = 0 min_count_left = 0 i_left = 0 while ((max_count_left < 1) or (min_count_left < 1)) and (i_left < len(lsq_signal) - 1): time_series_extended[int(len(lsq_signal) - 2 - i_left)] = \ 2 * sum(weights_left * np.hstack((time_series_extended[int(len(lsq_signal) - 1 - i_left): int(len(lsq_signal) - 1 - i_left + neural_network_k)], 1))) + 1 i_left += 1 if i_left > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_left += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_left += 1 lsq_utils = emd_utils.Utility(time=time, time_series=lsq_signal) utils_extended = emd_utils.Utility(time=time_extended, time_series=time_series_extended) maxima = lsq_signal[lsq_utils.max_bool_func_1st_order_fd()] maxima_time = time[lsq_utils.max_bool_func_1st_order_fd()] maxima_extrapolate = time_series_extended[utils_extended.max_bool_func_1st_order_fd()][-1] maxima_extrapolate_time = time_extended[utils_extended.max_bool_func_1st_order_fd()][-1] minima = lsq_signal[lsq_utils.min_bool_func_1st_order_fd()] minima_time = time[lsq_utils.min_bool_func_1st_order_fd()] minima_extrapolate = time_series_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] minima_extrapolate_time = time_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Single Neuron Neural Network Example') plt.plot(time, lsq_signal, zorder=2, label='Signal') plt.plot(time_extended, time_series_extended, c='g', zorder=1, label=textwrap.fill('Extrapolated signal', 12)) plt.scatter(maxima_time, maxima, c='r', zorder=3, label='Maxima') plt.scatter(minima_time, minima, c='b', zorder=3, label='Minima') plt.scatter(maxima_extrapolate_time, maxima_extrapolate, c='magenta', zorder=3, label=textwrap.fill('Extrapolated maxima', 12)) plt.scatter(minima_extrapolate_time, minima_extrapolate, c='cyan', zorder=4, label=textwrap.fill('Extrapolated minima', 12)) plt.plot(((time[-302] + time[-301]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k', label=textwrap.fill('Neural network inputs', 13)) plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(((time_extended[-1001] + time_extended[-1002]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k') plt.plot(((time[-202] + time[-201]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed', label=textwrap.fill('Neural network targets', 13)) plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2), ((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2), ((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='gray') plt.plot(((time_extended[-1001] + time_extended[-1000]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed') plt.xlim(3.4 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/neural_network.png') plt.show() # plot 6a np.random.seed(0) time = np.linspace(0, 5 * np.pi, 1001) knots_51 = np.linspace(0, 5 * np.pi, 51) time_series = np.cos(2 * time) + np.cos(4 * time) + np.cos(8 * time) noise = np.random.normal(0, 1, len(time_series)) time_series += noise advemdpy = EMD(time=time, time_series=time_series) imfs_51, hts_51, ifs_51 = advemdpy.empirical_mode_decomposition(knots=knots_51, max_imfs=3, edge_effect='symmetric_anchor', verbose=False)[:3] knots_31 = np.linspace(0, 5 * np.pi, 31) imfs_31, hts_31, ifs_31 = advemdpy.empirical_mode_decomposition(knots=knots_31, max_imfs=2, edge_effect='symmetric_anchor', verbose=False)[:3] knots_11 = np.linspace(0, 5 * np.pi, 11) imfs_11, hts_11, ifs_11 = advemdpy.empirical_mode_decomposition(knots=knots_11, max_imfs=1, edge_effect='symmetric_anchor', verbose=False)[:3] fig, axs = plt.subplots(3, 1) plt.suptitle(textwrap.fill('Comparison of Trends Extracted with Different Knot Sequences', 40)) plt.subplots_adjust(hspace=0.1) axs[0].plot(time, time_series, label='Time series') axs[0].plot(time, imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 1, IMF 2, & IMF 3 with 51 knots', 21)) print(f'DFA fluctuation with 51 knots: {np.round(np.var(time_series - (imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :])), 3)}') for knot in knots_51: axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[0].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[0].set_xticklabels(['', '', '', '', '', '']) axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[0].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[0].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].plot(time, time_series, label='Time series') axs[1].plot(time, imfs_31[1, :] + imfs_31[2, :], label=textwrap.fill('Sum of IMF 1 and IMF 2 with 31 knots', 19)) axs[1].plot(time, imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 2 and IMF 3 with 51 knots', 19)) print(f'DFA fluctuation with 31 knots: {np.round(np.var(time_series - (imfs_31[1, :] + imfs_31[2, :])), 3)}') for knot in knots_31: axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[1].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[1].set_xticklabels(['', '', '', '', '', '']) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) axs[1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[1].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[1].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[1].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') axs[2].plot(time, time_series, label='Time series') axs[2].plot(time, imfs_11[1, :], label='IMF 1 with 11 knots') axs[2].plot(time, imfs_31[2, :], label='IMF 2 with 31 knots') axs[2].plot(time, imfs_51[3, :], label='IMF 3 with 51 knots') print(f'DFA fluctuation with 11 knots: {np.round(np.var(time_series - imfs_51[3, :]), 3)}') for knot in knots_11: axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[2].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[2].set_xticklabels(['$0$', r'$\pi$', r'$2\pi$', r'$3\pi$', r'$4\pi$', r'$5\pi$']) box_2 = axs[2].get_position() axs[2].set_position([box_2.x0 - 0.05, box_2.y0, box_2.width * 0.85, box_2.height]) axs[2].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[2].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[2].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[2].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[2].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') plt.savefig('jss_figures/DFA_different_trends.png') plt.show() # plot 6b fig, axs = plt.subplots(3, 1) plt.suptitle(textwrap.fill('Comparison of Trends Extracted with Different Knot Sequences Zoomed Region', 40)) plt.subplots_adjust(hspace=0.1) axs[0].plot(time, time_series, label='Time series') axs[0].plot(time, imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 1, IMF 2, & IMF 3 with 51 knots', 21)) for knot in knots_51: axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[0].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[0].set_xticklabels(['', '', '', '', '', '']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[0].set_ylim(-5.5, 5.5) axs[0].set_xlim(0.95 * np.pi, 1.55 * np.pi) axs[1].plot(time, time_series, label='Time series') axs[1].plot(time, imfs_31[1, :] + imfs_31[2, :], label=textwrap.fill('Sum of IMF 1 and IMF 2 with 31 knots', 19)) axs[1].plot(time, imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 2 and IMF 3 with 51 knots', 19)) for knot in knots_31: axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[1].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[1].set_xticklabels(['', '', '', '', '', '']) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) axs[1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].set_ylim(-5.5, 5.5) axs[1].set_xlim(0.95 * np.pi, 1.55 * np.pi) axs[2].plot(time, time_series, label='Time series') axs[2].plot(time, imfs_11[1, :], label='IMF 1 with 11 knots') axs[2].plot(time, imfs_31[2, :], label='IMF 2 with 31 knots') axs[2].plot(time, imfs_51[3, :], label='IMF 3 with 51 knots') for knot in knots_11: axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[2].set_xticks([np.pi, (3 / 2) * np.pi]) axs[2].set_xticklabels([r'$\pi$', r'$\frac{3}{2}\pi$']) box_2 = axs[2].get_position() axs[2].set_position([box_2.x0 - 0.05, box_2.y0, box_2.width * 0.85, box_2.height]) axs[2].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[2].set_ylim(-5.5, 5.5) axs[2].set_xlim(0.95 * np.pi, 1.55 * np.pi) plt.savefig('jss_figures/DFA_different_trends_zoomed.png') plt.show() hs_ouputs = hilbert_spectrum(time, imfs_51, hts_51, ifs_51, max_frequency=12, plot=False) # plot 6c ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 0.9 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Simple Sinusoidal Time Seres with Added Noise', 50)) x_hs, y, z = hs_ouputs z_min, z_max = 0, np.abs(z).max() ax.pcolormesh(x_hs, y, np.abs(z), cmap='gist_rainbow', vmin=z_min, vmax=z_max) ax.plot(x_hs[0, :], 8 * np.ones_like(x_hs[0, :]), '--', label=r'$\omega = 8$', Linewidth=3) ax.plot(x_hs[0, :], 4 * np.ones_like(x_hs[0, :]), '--', label=r'$\omega = 4$', Linewidth=3) ax.plot(x_hs[0, :], 2 * np.ones_like(x_hs[0, :]), '--', label=r'$\omega = 2$', Linewidth=3) ax.set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi]) ax.set_xticklabels(['$0$', r'$\pi$', r'$2\pi$', r'$3\pi$', r'$4\pi$']) plt.ylabel(r'Frequency (rad.s$^{-1}$)') plt.xlabel('Time (s)') box_0 = ax.get_position() ax.set_position([box_0.x0, box_0.y0 + 0.05, box_0.width * 0.85, box_0.height * 0.9]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/DFA_hilbert_spectrum.png') plt.show() # plot 6c time = np.linspace(0, 5 * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) knots = np.linspace(0, 5 * np.pi, 51) fluc = Fluctuation(time=time, time_series=time_series) max_unsmoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='maxima', smooth=False) max_smoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='maxima', smooth=True) min_unsmoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='minima', smooth=False) min_smoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='minima', smooth=True) util = Utility(time=time, time_series=time_series) maxima = util.max_bool_func_1st_order_fd() minima = util.min_bool_func_1st_order_fd() ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title(textwrap.fill('Plot Demonstrating Unsmoothed Extrema Envelopes if Schoenberg–Whitney Conditions are Not Satisfied', 50)) plt.plot(time, time_series, label='Time series', zorder=2, LineWidth=2) plt.scatter(time[maxima], time_series[maxima], c='r', label='Maxima', zorder=10) plt.scatter(time[minima], time_series[minima], c='b', label='Minima', zorder=10) plt.plot(time, max_unsmoothed[0], label=textwrap.fill('Unsmoothed maxima envelope', 10), c='darkorange') plt.plot(time, max_smoothed[0], label=textwrap.fill('Smoothed maxima envelope', 10), c='red') plt.plot(time, min_unsmoothed[0], label=textwrap.fill('Unsmoothed minima envelope', 10), c='cyan') plt.plot(time, min_smoothed[0], label=textwrap.fill('Smoothed minima envelope', 10), c='blue') for knot in knots[:-1]: plt.plot(knot * np.ones(101), np.linspace(-3.0, -2.0, 101), '--', c='grey', zorder=1) plt.plot(knots[-1] * np.ones(101), np.linspace(-3.0, -2.0, 101), '--', c='grey', label='Knots', zorder=1) plt.xticks((0, 1 * np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi), (r'$0$', r'$\pi$', r'2$\pi$', r'3$\pi$', r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) plt.xlim(-0.25 * np.pi, 5.25 * np.pi) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/Schoenberg_Whitney_Conditions.png') plt.show() # plot 7 a = 0.25 width = 0.2 time = np.linspace((0 + a) * np.pi, (5 - a) * np.pi, 1001) knots = np.linspace((0 + a) * np.pi, (5 - a) * np.pi, 11) time_series = np.cos(time) +
np.cos(5 * time)
numpy.cos
# coding: utf-8 # Licensed under a 3-clause BSD style license - see LICENSE.rst """ Test the Logarithmic Units and Quantities """ from __future__ import (absolute_import, unicode_literals, division, print_function) from ...extern import six from ...extern.six.moves import zip import pickle import itertools import pytest import numpy as np from numpy.testing.utils import assert_allclose from ...tests.helper import assert_quantity_allclose from ... import units as u, constants as c lu_units = [u.dex, u.mag, u.decibel] lu_subclasses = [u.DexUnit, u.MagUnit, u.DecibelUnit] lq_subclasses = [u.Dex, u.Magnitude, u.Decibel] pu_sample = (u.dimensionless_unscaled, u.m, u.g/u.s**2, u.Jy) class TestLogUnitCreation(object): def test_logarithmic_units(self): """Check logarithmic units are set up correctly.""" assert u.dB.to(u.dex) == 0.1 assert u.dex.to(u.mag) == -2.5 assert u.mag.to(u.dB) == -4 @pytest.mark.parametrize('lu_unit, lu_cls', zip(lu_units, lu_subclasses)) def test_callable_units(self, lu_unit, lu_cls): assert isinstance(lu_unit, u.UnitBase) assert callable(lu_unit) assert lu_unit._function_unit_class is lu_cls @pytest.mark.parametrize('lu_unit', lu_units) def test_equality_to_normal_unit_for_dimensionless(self, lu_unit): lu = lu_unit() assert lu == lu._default_function_unit # eg, MagUnit() == u.mag assert lu._default_function_unit == lu # and u.mag == MagUnit() @pytest.mark.parametrize('lu_unit, physical_unit', itertools.product(lu_units, pu_sample)) def test_call_units(self, lu_unit, physical_unit): """Create a LogUnit subclass using the callable unit and physical unit, and do basic check that output is right.""" lu1 = lu_unit(physical_unit) assert lu1.physical_unit == physical_unit assert lu1.function_unit == lu1._default_function_unit def test_call_invalid_unit(self): with pytest.raises(TypeError): u.mag([]) with pytest.raises(ValueError): u.mag(u.mag()) @pytest.mark.parametrize('lu_cls, physical_unit', itertools.product( lu_subclasses + [u.LogUnit], pu_sample)) def test_subclass_creation(self, lu_cls, physical_unit): """Create a LogUnit subclass object for given physical unit, and do basic check that output is right.""" lu1 = lu_cls(physical_unit) assert lu1.physical_unit == physical_unit assert lu1.function_unit == lu1._default_function_unit lu2 = lu_cls(physical_unit, function_unit=2*lu1._default_function_unit) assert lu2.physical_unit == physical_unit assert lu2.function_unit == u.Unit(2*lu2._default_function_unit) with pytest.raises(ValueError): lu_cls(physical_unit, u.m) def test_predefined_magnitudes(): assert_quantity_allclose((-21.1*u.STmag).physical, 1.*u.erg/u.cm**2/u.s/u.AA) assert_quantity_allclose((-48.6*u.ABmag).physical, 1.*u.erg/u.cm**2/u.s/u.Hz) assert_quantity_allclose((0*u.M_bol).physical, c.L_bol0) assert_quantity_allclose((0*u.m_bol).physical, c.L_bol0/(4.*np.pi*(10.*c.pc)**2)) def test_predefined_reinitialisation(): assert u.mag('ST') == u.STmag assert u.mag('AB') == u.ABmag assert u.mag('Bol') == u.M_bol assert u.mag('bol') == u.m_bol def test_predefined_string_roundtrip(): """Ensure roundtripping; see #5015""" with u.magnitude_zero_points.enable(): assert u.Unit(u.STmag.to_string()) == u.STmag assert u.Unit(u.ABmag.to_string()) == u.ABmag assert u.Unit(u.M_bol.to_string()) == u.M_bol assert u.Unit(u.m_bol.to_string()) == u.m_bol def test_inequality(): """Check __ne__ works (regresssion for #5342).""" lu1 = u.mag(u.Jy) lu2 = u.dex(u.Jy) lu3 = u.mag(u.Jy**2) lu4 = lu3 - lu1 assert lu1 != lu2 assert lu1 != lu3 assert lu1 == lu4 class TestLogUnitStrings(object): def test_str(self): """Do some spot checks that str, repr, etc. work as expected.""" lu1 = u.mag(u.Jy) assert str(lu1) == 'mag(Jy)' assert repr(lu1) == 'Unit("mag(Jy)")' assert lu1.to_string('generic') == 'mag(Jy)' with pytest.raises(ValueError): lu1.to_string('fits') lu2 = u.dex() assert str(lu2) == 'dex' assert repr(lu2) == 'Unit("dex(1)")' assert lu2.to_string() == 'dex(1)' lu3 = u.MagUnit(u.Jy, function_unit=2*u.mag) assert str(lu3) == '2 mag(Jy)' assert repr(lu3) == 'MagUnit("Jy", unit="2 mag")' assert lu3.to_string() == '2 mag(Jy)' lu4 = u.mag(u.ct) assert lu4.to_string('generic') == 'mag(ct)' assert lu4.to_string('latex') == ('$\\mathrm{mag}$$\\mathrm{\\left( ' '\\mathrm{ct} \\right)}$') assert lu4._repr_latex_() == lu4.to_string('latex') class TestLogUnitConversion(object): @pytest.mark.parametrize('lu_unit, physical_unit', itertools.product(lu_units, pu_sample)) def test_physical_unit_conversion(self, lu_unit, physical_unit): """Check various LogUnit subclasses are equivalent and convertible to their non-log counterparts.""" lu1 = lu_unit(physical_unit) assert lu1.is_equivalent(physical_unit) assert lu1.to(physical_unit, 0.) == 1. assert physical_unit.is_equivalent(lu1) assert physical_unit.to(lu1, 1.) == 0. pu = u.Unit(8.*physical_unit) assert lu1.is_equivalent(physical_unit) assert lu1.to(pu, 0.) == 0.125 assert pu.is_equivalent(lu1) assert_allclose(pu.to(lu1, 0.125), 0., atol=1.e-15) # Check we round-trip. value = np.linspace(0., 10., 6) assert_allclose(pu.to(lu1, lu1.to(pu, value)), value, atol=1.e-15) # And that we're not just returning True all the time. pu2 = u.g assert not lu1.is_equivalent(pu2) with pytest.raises(u.UnitsError): lu1.to(pu2) assert not pu2.is_equivalent(lu1) with pytest.raises(u.UnitsError): pu2.to(lu1) @pytest.mark.parametrize('lu_unit', lu_units) def test_container_unit_conversion(self, lu_unit): """Check that conversion to logarithmic units (u.mag, u.dB, u.dex) is only possible when the physical unit is dimensionless.""" values = np.linspace(0., 10., 6) lu1 = lu_unit(u.dimensionless_unscaled) assert lu1.is_equivalent(lu1.function_unit) assert_allclose(lu1.to(lu1.function_unit, values), values) lu2 = lu_unit(u.Jy) assert not lu2.is_equivalent(lu2.function_unit) with pytest.raises(u.UnitsError): lu2.to(lu2.function_unit, values) @pytest.mark.parametrize( 'flu_unit, tlu_unit, physical_unit', itertools.product(lu_units, lu_units, pu_sample)) def test_subclass_conversion(self, flu_unit, tlu_unit, physical_unit): """Check various LogUnit subclasses are equivalent and convertible to each other if they correspond to equivalent physical units.""" values = np.linspace(0., 10., 6) flu = flu_unit(physical_unit) tlu = tlu_unit(physical_unit) assert flu.is_equivalent(tlu) assert_allclose(flu.to(tlu), flu.function_unit.to(tlu.function_unit)) assert_allclose(flu.to(tlu, values), values * flu.function_unit.to(tlu.function_unit)) tlu2 = tlu_unit(u.Unit(100.*physical_unit)) assert flu.is_equivalent(tlu2) # Check that we round-trip. assert_allclose(flu.to(tlu2, tlu2.to(flu, values)), values, atol=1.e-15) tlu3 = tlu_unit(physical_unit.to_system(u.si)[0]) assert flu.is_equivalent(tlu3) assert_allclose(flu.to(tlu3, tlu3.to(flu, values)), values, atol=1.e-15) tlu4 = tlu_unit(u.g) assert not flu.is_equivalent(tlu4) with pytest.raises(u.UnitsError): flu.to(tlu4, values) def test_unit_decomposition(self): lu = u.mag(u.Jy) assert lu.decompose() == u.mag(u.Jy.decompose()) assert lu.decompose().physical_unit.bases == [u.kg, u.s] assert lu.si == u.mag(u.Jy.si) assert lu.si.physical_unit.bases == [u.kg, u.s] assert lu.cgs == u.mag(u.Jy.cgs) assert lu.cgs.physical_unit.bases == [u.g, u.s] def test_unit_multiple_possible_equivalencies(self): lu = u.mag(u.Jy) assert lu.is_equivalent(pu_sample) class TestLogUnitArithmetic(object): def test_multiplication_division(self): """Check that multiplication/division with other units is only possible when the physical unit is dimensionless, and that this turns the unit into a normal one.""" lu1 = u.mag(u.Jy) with pytest.raises(u.UnitsError): lu1 * u.m with pytest.raises(u.UnitsError): u.m * lu1 with pytest.raises(u.UnitsError): lu1 / lu1 for unit in (u.dimensionless_unscaled, u.m, u.mag, u.dex): with pytest.raises(u.UnitsError): lu1 / unit lu2 = u.mag(u.dimensionless_unscaled) with pytest.raises(u.UnitsError): lu2 * lu1 with pytest.raises(u.UnitsError): lu2 / lu1 # But dimensionless_unscaled can be cancelled. assert lu2 / lu2 == u.dimensionless_unscaled # With dimensionless, normal units are OK, but we return a plain unit. tf = lu2 * u.m tr = u.m * lu2 for t in (tf, tr): assert not isinstance(t, type(lu2)) assert t == lu2.function_unit * u.m with u.set_enabled_equivalencies(u.logarithmic()): with pytest.raises(u.UnitsError): t.to(lu2.physical_unit) # Now we essentially have a LogUnit with a prefactor of 100, # so should be equivalent again. t = tf / u.cm with u.set_enabled_equivalencies(u.logarithmic()): assert t.is_equivalent(lu2.function_unit) assert_allclose(t.to(u.dimensionless_unscaled, np.arange(3.)/100.), lu2.to(lu2.physical_unit, np.arange(3.))) # If we effectively remove lu1, a normal unit should be returned. t2 = tf / lu2 assert not isinstance(t2, type(lu2)) assert t2 == u.m t3 = tf / lu2.function_unit assert not isinstance(t3, type(lu2)) assert t3 == u.m # For completeness, also ensure non-sensical operations fail with pytest.raises(TypeError): lu1 * object() with pytest.raises(TypeError): slice(None) * lu1 with pytest.raises(TypeError): lu1 / [] with pytest.raises(TypeError): 1 / lu1 @pytest.mark.parametrize('power', (2, 0.5, 1, 0)) def test_raise_to_power(self, power): """Check that raising LogUnits to some power is only possible when the physical unit is dimensionless, and that conversion is turned off when the resulting logarithmic unit (such as mag**2) is incompatible.""" lu1 = u.mag(u.Jy) if power == 0: assert lu1 ** power == u.dimensionless_unscaled elif power == 1: assert lu1 ** power == lu1 else: with pytest.raises(u.UnitsError): lu1 ** power # With dimensionless, though, it works, but returns a normal unit. lu2 = u.mag(u.dimensionless_unscaled) t = lu2**power if power == 0: assert t == u.dimensionless_unscaled elif power == 1: assert t == lu2 else: assert not isinstance(t, type(lu2)) assert t == lu2.function_unit**power # also check we roundtrip t2 = t**(1./power) assert t2 == lu2.function_unit with u.set_enabled_equivalencies(u.logarithmic()): assert_allclose(t2.to(u.dimensionless_unscaled, np.arange(3.)), lu2.to(lu2.physical_unit, np.arange(3.))) @pytest.mark.parametrize('other', pu_sample) def test_addition_subtraction_to_normal_units_fails(self, other): lu1 = u.mag(u.Jy) with pytest.raises(u.UnitsError): lu1 + other with pytest.raises(u.UnitsError): lu1 - other with pytest.raises(u.UnitsError): other - lu1 def test_addition_subtraction_to_non_units_fails(self): lu1 = u.mag(u.Jy) with pytest.raises(TypeError): lu1 + 1. with pytest.raises(TypeError): lu1 - [1., 2., 3.] @pytest.mark.parametrize( 'other', (u.mag, u.mag(), u.mag(u.Jy), u.mag(u.m), u.Unit(2*u.mag), u.MagUnit('', 2.*u.mag))) def test_addition_subtraction(self, other): """Check physical units are changed appropriately""" lu1 = u.mag(u.Jy) other_pu = getattr(other, 'physical_unit', u.dimensionless_unscaled) lu_sf = lu1 + other assert lu_sf.is_equivalent(lu1.physical_unit * other_pu) lu_sr = other + lu1 assert lu_sr.is_equivalent(lu1.physical_unit * other_pu) lu_df = lu1 - other assert lu_df.is_equivalent(lu1.physical_unit / other_pu) lu_dr = other - lu1 assert lu_dr.is_equivalent(other_pu / lu1.physical_unit) def test_complicated_addition_subtraction(self): """for fun, a more complicated example of addition and subtraction""" dm0 = u.Unit('DM', 1./(4.*np.pi*(10.*u.pc)**2)) lu_dm = u.mag(dm0) lu_absST = u.STmag - lu_dm assert lu_absST.is_equivalent(u.erg/u.s/u.AA) def test_neg_pos(self): lu1 = u.mag(u.Jy) neg_lu = -lu1 assert neg_lu != lu1 assert neg_lu.physical_unit == u.Jy**-1 assert -neg_lu == lu1 pos_lu = +lu1 assert pos_lu is not lu1 assert pos_lu == lu1 def test_pickle(): lu1 = u.dex(u.cm/u.s**2) s = pickle.dumps(lu1) lu2 = pickle.loads(s) assert lu1 == lu2 def test_hashable(): lu1 = u.dB(u.mW) lu2 = u.dB(u.m) lu3 = u.dB(u.mW) assert hash(lu1) != hash(lu2) assert hash(lu1) == hash(lu3) luset = {lu1, lu2, lu3} assert len(luset) == 2 class TestLogQuantityCreation(object): @pytest.mark.parametrize('lq, lu', zip(lq_subclasses + [u.LogQuantity], lu_subclasses + [u.LogUnit])) def test_logarithmic_quantities(self, lq, lu): """Check logarithmic quantities are all set up correctly""" assert lq._unit_class == lu assert type(lu()._quantity_class(1.)) is lq @pytest.mark.parametrize('lq_cls, physical_unit', itertools.product(lq_subclasses, pu_sample)) def test_subclass_creation(self, lq_cls, physical_unit): """Create LogQuantity subclass objects for some physical units, and basic check on transformations""" value = np.arange(1., 10.) log_q = lq_cls(value * physical_unit) assert log_q.unit.physical_unit == physical_unit assert log_q.unit.function_unit == log_q.unit._default_function_unit assert_allclose(log_q.physical.value, value) with pytest.raises(ValueError): lq_cls(value, physical_unit) @pytest.mark.parametrize( 'unit', (u.mag, u.mag(), u.mag(u.Jy), u.mag(u.m), u.Unit(2*u.mag), u.MagUnit('', 2.*u.mag), u.MagUnit(u.Jy, -1*u.mag), u.MagUnit(u.m, -2.*u.mag))) def test_different_units(self, unit): q = u.Magnitude(1.23, unit) assert q.unit.function_unit == getattr(unit, 'function_unit', unit) assert q.unit.physical_unit is getattr(unit, 'physical_unit', u.dimensionless_unscaled) @pytest.mark.parametrize('value, unit', ( (1.*u.mag(u.Jy), None), (1.*u.dex(u.Jy), None), (1.*u.mag(u.W/u.m**2/u.Hz), u.mag(u.Jy)), (1.*u.dex(u.W/u.m**2/u.Hz), u.mag(u.Jy)))) def test_function_values(self, value, unit): lq = u.Magnitude(value, unit) assert lq == value assert lq.unit.function_unit == u.mag assert lq.unit.physical_unit == getattr(unit, 'physical_unit', value.unit.physical_unit) @pytest.mark.parametrize( 'unit', (u.mag(), u.mag(u.Jy), u.mag(u.m), u.MagUnit('', 2.*u.mag), u.MagUnit(u.Jy, -1*u.mag), u.MagUnit(u.m, -2.*u.mag))) def test_indirect_creation(self, unit): q1 = 2.5 * unit assert isinstance(q1, u.Magnitude) assert q1.value == 2.5 assert q1.unit == unit pv = 100. * unit.physical_unit q2 = unit * pv assert q2.unit == unit assert q2.unit.physical_unit == pv.unit assert q2.to_value(unit.physical_unit) == 100. assert (q2._function_view / u.mag).to_value(1) == -5. q3 = unit / 0.4 assert q3 == q1 def test_from_view(self): # Cannot view a physical quantity as a function quantity, since the # values would change. q = [100., 1000.] * u.cm/u.s**2 with pytest.raises(TypeError): q.view(u.Dex) # But fine if we have the right magnitude. q = [2., 3.] * u.dex lq = q.view(u.Dex) assert isinstance(lq, u.Dex) assert lq.unit.physical_unit == u.dimensionless_unscaled assert np.all(q == lq) def test_using_quantity_class(self): """Check that we can use Quantity if we have subok=True""" # following issue #5851 lu = u.dex(u.AA) with pytest.raises(u.UnitTypeError): u.Quantity(1., lu) q = u.Quantity(1., lu, subok=True) assert type(q) is lu._quantity_class def test_conversion_to_and_from_physical_quantities(): """Ensures we can convert from regular quantities.""" mst = [10., 12., 14.] * u.STmag flux_lambda = mst.physical mst_roundtrip = flux_lambda.to(u.STmag) # check we return a logquantity; see #5178. assert isinstance(mst_roundtrip, u.Magnitude) assert mst_roundtrip.unit == mst.unit assert_allclose(mst_roundtrip.value, mst.value) wave = [4956.8, 4959.55, 4962.3] * u.AA flux_nu = mst.to(u.Jy, equivalencies=u.spectral_density(wave)) mst_roundtrip2 = flux_nu.to(u.STmag, u.spectral_density(wave)) assert isinstance(mst_roundtrip2, u.Magnitude) assert mst_roundtrip2.unit == mst.unit assert_allclose(mst_roundtrip2.value, mst.value) def test_quantity_decomposition(): lq = 10.*u.mag(u.Jy) assert lq.decompose() == lq assert lq.decompose().unit.physical_unit.bases == [u.kg, u.s] assert lq.si == lq assert lq.si.unit.physical_unit.bases == [u.kg, u.s] assert lq.cgs == lq assert lq.cgs.unit.physical_unit.bases == [u.g, u.s] class TestLogQuantityViews(object): def setup(self): self.lq = u.Magnitude(np.arange(10.) * u.Jy) self.lq2 = u.Magnitude(np.arange(5.)) def test_value_view(self): lq_value = self.lq.value assert type(lq_value) is np.ndarray lq_value[2] = -1. assert np.all(self.lq.value == lq_value) def test_function_view(self): lq_fv = self.lq._function_view assert type(lq_fv) is u.Quantity assert lq_fv.unit is self.lq.unit.function_unit lq_fv[3] = -2. * lq_fv.unit assert np.all(self.lq.value == lq_fv.value) def test_quantity_view(self): # Cannot view as Quantity, since the unit cannot be represented. with pytest.raises(TypeError): self.lq.view(u.Quantity) # But a dimensionless one is fine. q2 = self.lq2.view(u.Quantity) assert q2.unit is u.mag assert np.all(q2.value == self.lq2.value) lq3 = q2.view(u.Magnitude) assert type(lq3.unit) is u.MagUnit assert lq3.unit.physical_unit == u.dimensionless_unscaled assert np.all(lq3 == self.lq2) class TestLogQuantitySlicing(object): def test_item_get_and_set(self): lq1 = u.Magnitude(np.arange(1., 11.)*u.Jy) assert lq1[9] == u.Magnitude(10.*u.Jy) lq1[2] = 100.*u.Jy assert lq1[2] == u.Magnitude(100.*u.Jy) with pytest.raises(u.UnitsError): lq1[2] = 100.*u.m with pytest.raises(u.UnitsError): lq1[2] = 100.*u.mag with pytest.raises(u.UnitsError): lq1[2] = u.Magnitude(100.*u.m) assert lq1[2] == u.Magnitude(100.*u.Jy) def test_slice_get_and_set(self): lq1 = u.Magnitude(np.arange(1., 10.)*u.Jy) lq1[2:4] = 100.*u.Jy assert np.all(lq1[2:4] == u.Magnitude(100.*u.Jy)) with pytest.raises(u.UnitsError): lq1[2:4] = 100.*u.m with pytest.raises(u.UnitsError): lq1[2:4] = 100.*u.mag with pytest.raises(u.UnitsError): lq1[2:4] = u.Magnitude(100.*u.m) assert np.all(lq1[2] == u.Magnitude(100.*u.Jy)) class TestLogQuantityArithmetic(object): def test_multiplication_division(self): """Check that multiplication/division with other quantities is only possible when the physical unit is dimensionless, and that this turns the result into a normal quantity.""" lq = u.Magnitude(np.arange(1., 11.)*u.Jy) with pytest.raises(u.UnitsError): lq * (1.*u.m) with pytest.raises(u.UnitsError): (1.*u.m) * lq with pytest.raises(u.UnitsError): lq / lq for unit in (u.m, u.mag, u.dex): with pytest.raises(u.UnitsError): lq / unit lq2 = u.Magnitude(np.arange(1, 11.)) with pytest.raises(u.UnitsError): lq2 * lq with pytest.raises(u.UnitsError): lq2 / lq with pytest.raises(u.UnitsError): lq / lq2 # but dimensionless_unscaled can be cancelled r = lq2 / u.Magnitude(2.) assert r.unit == u.dimensionless_unscaled assert np.all(r.value == lq2.value/2.) # with dimensionless, normal units OK, but return normal quantities tf = lq2 * u.m tr = u.m * lq2 for t in (tf, tr): assert not isinstance(t, type(lq2)) assert t.unit == lq2.unit.function_unit * u.m with u.set_enabled_equivalencies(u.logarithmic()): with pytest.raises(u.UnitsError): t.to(lq2.unit.physical_unit) t = tf / (50.*u.cm) # now we essentially have the same quantity but with a prefactor of 2 assert t.unit.is_equivalent(lq2.unit.function_unit) assert_allclose(t.to(lq2.unit.function_unit), lq2._function_view*2) @pytest.mark.parametrize('power', (2, 0.5, 1, 0)) def test_raise_to_power(self, power): """Check that raising LogQuantities to some power is only possible when the physical unit is dimensionless, and that conversion is turned off when the resulting logarithmic unit (say, mag**2) is incompatible.""" lq = u.Magnitude(np.arange(1., 4.)*u.Jy) if power == 0: assert np.all(lq ** power == 1.) elif power == 1: assert
np.all(lq ** power == lq)
numpy.all
# ________ # / # \ / # \ / # \/ import random import textwrap import emd_mean import AdvEMDpy import emd_basis import emd_utils import numpy as np import pandas as pd import cvxpy as cvx import seaborn as sns import matplotlib.pyplot as plt from scipy.integrate import odeint from scipy.ndimage import gaussian_filter from emd_utils import time_extension, Utility from scipy.interpolate import CubicSpline from emd_hilbert import Hilbert, hilbert_spectrum from emd_preprocess import Preprocess from emd_mean import Fluctuation from AdvEMDpy import EMD # alternate packages from PyEMD import EMD as pyemd0215 import emd as emd040 sns.set(style='darkgrid') pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001) pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time) pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series) # plot 0 - addition fig = plt.figure(figsize=(9, 4)) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('First Iteration of Sifting Algorithm') plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1) plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()], c='r', label=r'$M(t_i)$', zorder=2) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4) plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()], c='c', label=r'$m(t_j)$', zorder=3) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5) plt.yticks(ticks=[-2, -1, 0, 1, 2]) plt.xticks(ticks=[0, np.pi, 2 * np.pi], labels=[r'0', r'$\pi$', r'$2\pi$']) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/pseudo_algorithm.png') plt.show() knots = np.arange(12) time = np.linspace(0, 11, 1101) basis = emd_basis.Basis(time=time, time_series=time) b_spline_basis = basis.cubic_b_spline(knots) chsi_basis = basis.chsi_basis(knots) # plot 1 plt.title('Non-Natural Cubic B-Spline Bases at Boundary') plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $') plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $') plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $') plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $') plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $') plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $']) plt.xlim(4.4, 6.6) plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.legend(loc='upper left') plt.savefig('jss_figures/boundary_bases.png') plt.show() # plot 1a - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) knots_uniform = np.linspace(0, 2 * np.pi, 51) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0] fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Uniform Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Uniform Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Uniform Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots_uniform)): axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_uniform.png') plt.show() # plot 1b - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=1, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Statically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Statically Optimised Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Statically Optimised Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots)): axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_1.png') plt.show() # plot 1c - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=2, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Dynamically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Dynamically Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Dynamically Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots[i])): axs[i].plot(knots[i][j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_2.png') plt.show() # plot 1d - addition window = 81 fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Filtering Demonstration') axs[1].set_title('Zoomed Region') preprocess_time = pseudo_alg_time.copy() np.random.seed(1) random.seed(1) preprocess_time_series = pseudo_alg_time_series + np.random.normal(0, 0.1, len(preprocess_time)) for i in random.sample(range(1000), 500): preprocess_time_series[i] += np.random.normal(0, 1) preprocess = Preprocess(time=preprocess_time, time_series=preprocess_time_series) axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[0].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[0].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[0].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[1].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[1].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[1].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_filter.png') plt.show() # plot 1e - addition fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Smoothing Demonstration') axs[1].set_title('Zoomed Region') axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[0].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) downsampled_and_decimated = preprocess.downsample() axs[0].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 11)) downsampled = preprocess.downsample(decimate=False) axs[0].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[1].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) axs[1].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 13)) axs[1].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.06, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.06, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_smooth.png') plt.show() # plot 2 fig, axs = plt.subplots(1, 2, sharey=True) axs[0].set_title('Cubic B-Spline Bases') axs[0].plot(time, b_spline_basis[2, :].T, '--', label='Basis 1') axs[0].plot(time, b_spline_basis[3, :].T, '--', label='Basis 2') axs[0].plot(time, b_spline_basis[4, :].T, '--', label='Basis 3') axs[0].plot(time, b_spline_basis[5, :].T, '--', label='Basis 4') axs[0].legend(loc='upper left') axs[0].plot(5 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].plot(6 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].set_xticks([5, 6]) axs[0].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[0].set_xlim(4.5, 6.5) axs[1].set_title('Cubic Hermite Spline Bases') axs[1].plot(time, chsi_basis[10, :].T, '--') axs[1].plot(time, chsi_basis[11, :].T, '--') axs[1].plot(time, chsi_basis[12, :].T, '--') axs[1].plot(time, chsi_basis[13, :].T, '--') axs[1].plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].set_xticks([5, 6]) axs[1].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[1].set_xlim(4.5, 6.5) plt.savefig('jss_figures/comparing_bases.png') plt.show() # plot 3 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_dash = maxima_y[-1] * np.ones_like(max_dash_time) min_dash_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_dash = minima_y[-1] * np.ones_like(min_dash_time) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) max_discard = maxima_y[-1] max_discard_time = minima_x[-1] - maxima_x[-1] + minima_x[-1] max_discard_dash_time = np.linspace(max_discard_time - width, max_discard_time + width, 101) max_discard_dash = max_discard * np.ones_like(max_discard_dash_time) dash_2_time = np.linspace(minima_x[-1], max_discard_time, 101) dash_2 = np.linspace(minima_y[-1], max_discard, 101) end_point_time = time[-1] end_point = time_series[-1] time_reflect = np.linspace((5 - a) * np.pi, (5 + a) * np.pi, 101) time_series_reflect = np.flip(np.cos(np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101)) + np.cos(5 * np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101))) time_series_anti_reflect = time_series_reflect[0] - time_series_reflect utils = emd_utils.Utility(time=time, time_series=time_series_anti_reflect) anti_max_bool = utils.max_bool_func_1st_order_fd() anti_max_point_time = time_reflect[anti_max_bool] anti_max_point = time_series_anti_reflect[anti_max_bool] utils = emd_utils.Utility(time=time, time_series=time_series_reflect) no_anchor_max_time = time_reflect[utils.max_bool_func_1st_order_fd()] no_anchor_max = time_series_reflect[utils.max_bool_func_1st_order_fd()] point_1 = 5.4 length_distance = np.linspace(maxima_y[-1], minima_y[-1], 101) length_distance_time = point_1 * np.pi * np.ones_like(length_distance) length_time = np.linspace(point_1 * np.pi - width, point_1 * np.pi + width, 101) length_top = maxima_y[-1] * np.ones_like(length_time) length_bottom = minima_y[-1] * np.ones_like(length_time) point_2 = 5.2 length_distance_2 = np.linspace(time_series[-1], minima_y[-1], 101) length_distance_time_2 = point_2 * np.pi * np.ones_like(length_distance_2) length_time_2 = np.linspace(point_2 * np.pi - width, point_2 * np.pi + width, 101) length_top_2 = time_series[-1] * np.ones_like(length_time_2) length_bottom_2 = minima_y[-1] * np.ones_like(length_time_2) symmetry_axis_1_time = minima_x[-1] * np.ones(101) symmetry_axis_2_time = time[-1] * np.ones(101) symmetry_axis = np.linspace(-2, 2, 101) end_time = np.linspace(time[-1] - width, time[-1] + width, 101) end_signal = time_series[-1] * np.ones_like(end_time) anti_symmetric_time = np.linspace(time[-1] - 0.5, time[-1] + 0.5, 101) anti_symmetric_signal = time_series[-1] * np.ones_like(anti_symmetric_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Symmetry Edge Effects Example') plt.plot(time_reflect, time_series_reflect, 'g--', LineWidth=2, label=textwrap.fill('Symmetric signal', 10)) plt.plot(time_reflect[:51], time_series_anti_reflect[:51], '--', c='purple', LineWidth=2, label=textwrap.fill('Anti-symmetric signal', 10)) plt.plot(max_dash_time, max_dash, 'k-') plt.plot(min_dash_time, min_dash, 'k-') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(length_distance_time, length_distance, 'k--') plt.plot(length_distance_time_2, length_distance_2, 'k--') plt.plot(length_time, length_top, 'k-') plt.plot(length_time, length_bottom, 'k-') plt.plot(length_time_2, length_top_2, 'k-') plt.plot(length_time_2, length_bottom_2, 'k-') plt.plot(end_time, end_signal, 'k-') plt.plot(symmetry_axis_1_time, symmetry_axis, 'r--', zorder=1) plt.plot(anti_symmetric_time, anti_symmetric_signal, 'r--', zorder=1) plt.plot(symmetry_axis_2_time, symmetry_axis, 'r--', label=textwrap.fill('Axes of symmetry', 10), zorder=1) plt.text(5.1 * np.pi, -0.7, r'$\beta$L') plt.text(5.34 * np.pi, -0.05, 'L') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(max_discard_time, max_discard, c='purple', zorder=4, label=textwrap.fill('Symmetric Discard maxima', 10)) plt.scatter(end_point_time, end_point, c='orange', zorder=4, label=textwrap.fill('Symmetric Anchor maxima', 10)) plt.scatter(anti_max_point_time, anti_max_point, c='green', zorder=4, label=textwrap.fill('Anti-Symmetric maxima', 10)) plt.scatter(no_anchor_max_time, no_anchor_max, c='gray', zorder=4, label=textwrap.fill('Symmetric maxima', 10)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_symmetry_anti.png') plt.show() # plot 4 a = 0.21 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_1 = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_dash_2 = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_dash_time_1 = maxima_x[-1] * np.ones_like(max_dash_1) max_dash_time_2 = maxima_x[-2] * np.ones_like(max_dash_1) min_dash_1 = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_dash_2 = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_dash_time_1 = minima_x[-1] * np.ones_like(min_dash_1) min_dash_time_2 = minima_x[-2] * np.ones_like(min_dash_1) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) dash_2_time = np.linspace(maxima_x[-1], minima_x[-2], 101) dash_2 = np.linspace(maxima_y[-1], minima_y[-2], 101) s1 = (minima_y[-2] - maxima_y[-1]) / (minima_x[-2] - maxima_x[-1]) slope_based_maximum_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) slope_based_maximum = minima_y[-1] + (slope_based_maximum_time - minima_x[-1]) * s1 max_dash_time_3 = slope_based_maximum_time * np.ones_like(max_dash_1) max_dash_3 = np.linspace(slope_based_maximum - width, slope_based_maximum + width, 101) dash_3_time = np.linspace(minima_x[-1], slope_based_maximum_time, 101) dash_3 = np.linspace(minima_y[-1], slope_based_maximum, 101) s2 = (minima_y[-1] - maxima_y[-1]) / (minima_x[-1] - maxima_x[-1]) slope_based_minimum_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) slope_based_minimum = slope_based_maximum - (slope_based_maximum_time - slope_based_minimum_time) * s2 min_dash_time_3 = slope_based_minimum_time * np.ones_like(min_dash_1) min_dash_3 = np.linspace(slope_based_minimum - width, slope_based_minimum + width, 101) dash_4_time = np.linspace(slope_based_maximum_time, slope_based_minimum_time) dash_4 = np.linspace(slope_based_maximum, slope_based_minimum) maxima_dash = np.linspace(2.5 - width, 2.5 + width, 101) maxima_dash_time_1 = maxima_x[-2] * np.ones_like(maxima_dash) maxima_dash_time_2 = maxima_x[-1] * np.ones_like(maxima_dash) maxima_dash_time_3 = slope_based_maximum_time * np.ones_like(maxima_dash) maxima_line_dash_time = np.linspace(maxima_x[-2], slope_based_maximum_time, 101) maxima_line_dash = 2.5 * np.ones_like(maxima_line_dash_time) minima_dash = np.linspace(-3.4 - width, -3.4 + width, 101) minima_dash_time_1 = minima_x[-2] * np.ones_like(minima_dash) minima_dash_time_2 = minima_x[-1] * np.ones_like(minima_dash) minima_dash_time_3 = slope_based_minimum_time * np.ones_like(minima_dash) minima_line_dash_time = np.linspace(minima_x[-2], slope_based_minimum_time, 101) minima_line_dash = -3.4 * np.ones_like(minima_line_dash_time) # slightly edit signal to make difference between slope-based method and improved slope-based method more clear time_series[time >= minima_x[-1]] = 1.5 * (time_series[time >= minima_x[-1]] - time_series[time == minima_x[-1]]) + \ time_series[time == minima_x[-1]] improved_slope_based_maximum_time = time[-1] improved_slope_based_maximum = time_series[-1] improved_slope_based_minimum_time = slope_based_minimum_time improved_slope_based_minimum = improved_slope_based_maximum + s2 * (improved_slope_based_minimum_time - improved_slope_based_maximum_time) min_dash_4 = np.linspace(improved_slope_based_minimum - width, improved_slope_based_minimum + width, 101) min_dash_time_4 = improved_slope_based_minimum_time * np.ones_like(min_dash_4) dash_final_time = np.linspace(improved_slope_based_maximum_time, improved_slope_based_minimum_time, 101) dash_final = np.linspace(improved_slope_based_maximum, improved_slope_based_minimum, 101) ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 0.9 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Slope-Based Edge Effects Example') plt.plot(max_dash_time_1, max_dash_1, 'k-') plt.plot(max_dash_time_2, max_dash_2, 'k-') plt.plot(max_dash_time_3, max_dash_3, 'k-') plt.plot(min_dash_time_1, min_dash_1, 'k-') plt.plot(min_dash_time_2, min_dash_2, 'k-') plt.plot(min_dash_time_3, min_dash_3, 'k-') plt.plot(min_dash_time_4, min_dash_4, 'k-') plt.plot(maxima_dash_time_1, maxima_dash, 'k-') plt.plot(maxima_dash_time_2, maxima_dash, 'k-') plt.plot(maxima_dash_time_3, maxima_dash, 'k-') plt.plot(minima_dash_time_1, minima_dash, 'k-') plt.plot(minima_dash_time_2, minima_dash, 'k-') plt.plot(minima_dash_time_3, minima_dash, 'k-') plt.text(4.34 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.74 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.12 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.50 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.30 * np.pi, 0.35, r'$s_1$') plt.text(4.43 * np.pi, -0.20, r'$s_2$') plt.text(4.30 * np.pi + (minima_x[-1] - minima_x[-2]), 0.35 + (minima_y[-1] - minima_y[-2]), r'$s_1$') plt.text(4.43 * np.pi + (slope_based_minimum_time - minima_x[-1]), -0.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.text(4.50 * np.pi + (slope_based_minimum_time - minima_x[-1]), 1.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.plot(minima_line_dash_time, minima_line_dash, 'k--') plt.plot(maxima_line_dash_time, maxima_line_dash, 'k--') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(dash_3_time, dash_3, 'k--') plt.plot(dash_4_time, dash_4, 'k--') plt.plot(dash_final_time, dash_final, 'k--') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(slope_based_maximum_time, slope_based_maximum, c='orange', zorder=4, label=textwrap.fill('Slope-based maximum', 11)) plt.scatter(slope_based_minimum_time, slope_based_minimum, c='purple', zorder=4, label=textwrap.fill('Slope-based minimum', 11)) plt.scatter(improved_slope_based_maximum_time, improved_slope_based_maximum, c='deeppink', zorder=4, label=textwrap.fill('Improved slope-based maximum', 11)) plt.scatter(improved_slope_based_minimum_time, improved_slope_based_minimum, c='dodgerblue', zorder=4, label=textwrap.fill('Improved slope-based minimum', 11)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-3, -2, -1, 0, 1, 2), ('-3', '-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_slope_based.png') plt.show() # plot 5 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] A2 = np.abs(maxima_y[-2] - minima_y[-2]) / 2 A1 = np.abs(maxima_y[-1] - minima_y[-1]) / 2 P2 = 2 * np.abs(maxima_x[-2] - minima_x[-2]) P1 = 2 * np.abs(maxima_x[-1] - minima_x[-1]) Huang_time = (P1 / P2) * (time[time >= maxima_x[-2]] - time[time == maxima_x[-2]]) + maxima_x[-1] Huang_wave = (A1 / A2) * (time_series[time >= maxima_x[-2]] - time_series[time == maxima_x[-2]]) + maxima_y[-1] Coughlin_time = Huang_time Coughlin_wave = A1 * np.cos(2 * np.pi * (1 / P1) * (Coughlin_time - Coughlin_time[0])) Average_max_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) Average_max = (maxima_y[-2] + maxima_y[-1]) / 2 Average_min_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) Average_min = (minima_y[-2] + minima_y[-1]) / 2 utils_Huang = emd_utils.Utility(time=time, time_series=Huang_wave) Huang_max_bool = utils_Huang.max_bool_func_1st_order_fd() Huang_min_bool = utils_Huang.min_bool_func_1st_order_fd() utils_Coughlin = emd_utils.Utility(time=time, time_series=Coughlin_wave) Coughlin_max_bool = utils_Coughlin.max_bool_func_1st_order_fd() Coughlin_min_bool = utils_Coughlin.min_bool_func_1st_order_fd() Huang_max_time = Huang_time[Huang_max_bool] Huang_max = Huang_wave[Huang_max_bool] Huang_min_time = Huang_time[Huang_min_bool] Huang_min = Huang_wave[Huang_min_bool] Coughlin_max_time = Coughlin_time[Coughlin_max_bool] Coughlin_max = Coughlin_wave[Coughlin_max_bool] Coughlin_min_time = Coughlin_time[Coughlin_min_bool] Coughlin_min = Coughlin_wave[Coughlin_min_bool] max_2_x_time = np.linspace(maxima_x[-2] - width, maxima_x[-2] + width, 101) max_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) max_2_x = maxima_y[-2] * np.ones_like(max_2_x_time) min_2_x_time = np.linspace(minima_x[-2] - width, minima_x[-2] + width, 101) min_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) min_2_x = minima_y[-2] * np.ones_like(min_2_x_time) dash_max_min_2_x = np.linspace(minima_y[-2], maxima_y[-2], 101) dash_max_min_2_x_time = 5.3 * np.pi * np.ones_like(dash_max_min_2_x) max_2_y = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) max_2_y_time = maxima_x[-2] * np.ones_like(max_2_y) min_2_y = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) min_2_y_time = minima_x[-2] * np.ones_like(min_2_y) dash_max_min_2_y_time = np.linspace(minima_x[-2], maxima_x[-2], 101) dash_max_min_2_y = -1.8 * np.ones_like(dash_max_min_2_y_time) max_1_x_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) max_1_x = maxima_y[-1] * np.ones_like(max_1_x_time) min_1_x_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) min_1_x = minima_y[-1] * np.ones_like(min_1_x_time) dash_max_min_1_x = np.linspace(minima_y[-1], maxima_y[-1], 101) dash_max_min_1_x_time = 5.4 * np.pi * np.ones_like(dash_max_min_1_x) max_1_y = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) max_1_y_time = maxima_x[-1] * np.ones_like(max_1_y) min_1_y = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) min_1_y_time = minima_x[-1] * np.ones_like(min_1_y) dash_max_min_1_y_time = np.linspace(minima_x[-1], maxima_x[-1], 101) dash_max_min_1_y = -2.1 * np.ones_like(dash_max_min_1_y_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Characteristic Wave Effects Example') plt.plot(time, time_series, LineWidth=2, label='Signal') plt.scatter(Huang_max_time, Huang_max, c='magenta', zorder=4, label=textwrap.fill('Huang maximum', 10)) plt.scatter(Huang_min_time, Huang_min, c='lime', zorder=4, label=textwrap.fill('Huang minimum', 10)) plt.scatter(Coughlin_max_time, Coughlin_max, c='darkorange', zorder=4, label=textwrap.fill('Coughlin maximum', 14)) plt.scatter(Coughlin_min_time, Coughlin_min, c='dodgerblue', zorder=4, label=textwrap.fill('Coughlin minimum', 14)) plt.scatter(Average_max_time, Average_max, c='orangered', zorder=4, label=textwrap.fill('Average maximum', 14)) plt.scatter(Average_min_time, Average_min, c='cyan', zorder=4, label=textwrap.fill('Average minimum', 14)) plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.plot(Huang_time, Huang_wave, '--', c='darkviolet', label=textwrap.fill('Huang Characteristic Wave', 14)) plt.plot(Coughlin_time, Coughlin_wave, '--', c='darkgreen', label=textwrap.fill('Coughlin Characteristic Wave', 14)) plt.plot(max_2_x_time, max_2_x, 'k-') plt.plot(max_2_x_time_side, max_2_x, 'k-') plt.plot(min_2_x_time, min_2_x, 'k-') plt.plot(min_2_x_time_side, min_2_x, 'k-') plt.plot(dash_max_min_2_x_time, dash_max_min_2_x, 'k--') plt.text(5.16 * np.pi, 0.85, r'$2a_2$') plt.plot(max_2_y_time, max_2_y, 'k-') plt.plot(max_2_y_time, max_2_y_side, 'k-') plt.plot(min_2_y_time, min_2_y, 'k-') plt.plot(min_2_y_time, min_2_y_side, 'k-') plt.plot(dash_max_min_2_y_time, dash_max_min_2_y, 'k--') plt.text(4.08 * np.pi, -2.2, r'$\frac{p_2}{2}$') plt.plot(max_1_x_time, max_1_x, 'k-') plt.plot(max_1_x_time_side, max_1_x, 'k-') plt.plot(min_1_x_time, min_1_x, 'k-') plt.plot(min_1_x_time_side, min_1_x, 'k-') plt.plot(dash_max_min_1_x_time, dash_max_min_1_x, 'k--') plt.text(5.42 * np.pi, -0.1, r'$2a_1$') plt.plot(max_1_y_time, max_1_y, 'k-') plt.plot(max_1_y_time, max_1_y_side, 'k-') plt.plot(min_1_y_time, min_1_y, 'k-') plt.plot(min_1_y_time, min_1_y_side, 'k-') plt.plot(dash_max_min_1_y_time, dash_max_min_1_y, 'k--') plt.text(4.48 * np.pi, -2.5, r'$\frac{p_1}{2}$') plt.xlim(3.9 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_characteristic_wave.png') plt.show() # plot 6 t = np.linspace(5, 95, 100) signal_orig = np.cos(2 * np.pi * t / 50) + 0.6 * np.cos(2 * np.pi * t / 25) + 0.5 * np.sin(2 * np.pi * t / 200) util_nn = emd_utils.Utility(time=t, time_series=signal_orig) maxima = signal_orig[util_nn.max_bool_func_1st_order_fd()] minima = signal_orig[util_nn.min_bool_func_1st_order_fd()] cs_max = CubicSpline(t[util_nn.max_bool_func_1st_order_fd()], maxima) cs_min = CubicSpline(t[util_nn.min_bool_func_1st_order_fd()], minima) time = np.linspace(0, 5 * np.pi, 1001) lsq_signal = np.cos(time) + np.cos(5 * time) knots = np.linspace(0, 5 * np.pi, 101) time_extended = time_extension(time) time_series_extended = np.zeros_like(time_extended) / 0 time_series_extended[int(len(lsq_signal) - 1):int(2 * (len(lsq_signal) - 1) + 1)] = lsq_signal neural_network_m = 200 neural_network_k = 100 # forward -> P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[(-(neural_network_m + neural_network_k - col)):(-(neural_network_m - col))] P[-1, col] = 1 # for additive constant t = lsq_signal[-neural_network_m:] # test - top seed_weights = np.ones(neural_network_k) / neural_network_k weights = 0 * seed_weights.copy() train_input = P[:-1, :] lr = 0.01 for iterations in range(1000): output = np.matmul(weights, train_input) error = (t - output) gradients = error * (- train_input) # guess average gradients average_gradients = np.mean(gradients, axis=1) # steepest descent max_gradient_vector = average_gradients * (np.abs(average_gradients) == max(np.abs(average_gradients))) adjustment = - lr * average_gradients # adjustment = - lr * max_gradient_vector weights += adjustment # test - bottom weights_right = np.hstack((weights, 0)) max_count_right = 0 min_count_right = 0 i_right = 0 while ((max_count_right < 1) or (min_count_right < 1)) and (i_right < len(lsq_signal) - 1): time_series_extended[int(2 * (len(lsq_signal) - 1) + 1 + i_right)] = \ sum(weights_right * np.hstack((time_series_extended[ int(2 * (len(lsq_signal) - 1) + 1 - neural_network_k + i_right): int(2 * (len(lsq_signal) - 1) + 1 + i_right)], 1))) i_right += 1 if i_right > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_right += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_right += 1 # backward <- P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[int(col + 1):int(col + neural_network_k + 1)] P[-1, col] = 1 # for additive constant t = lsq_signal[:neural_network_m] vx = cvx.Variable(int(neural_network_k + 1)) objective = cvx.Minimize(cvx.norm((2 * (vx * P) + 1 - t), 2)) # linear activation function is arbitrary prob = cvx.Problem(objective) result = prob.solve(verbose=True, solver=cvx.ECOS) weights_left = np.array(vx.value) max_count_left = 0 min_count_left = 0 i_left = 0 while ((max_count_left < 1) or (min_count_left < 1)) and (i_left < len(lsq_signal) - 1): time_series_extended[int(len(lsq_signal) - 2 - i_left)] = \ 2 * sum(weights_left * np.hstack((time_series_extended[int(len(lsq_signal) - 1 - i_left): int(len(lsq_signal) - 1 - i_left + neural_network_k)], 1))) + 1 i_left += 1 if i_left > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_left += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_left += 1 lsq_utils = emd_utils.Utility(time=time, time_series=lsq_signal) utils_extended = emd_utils.Utility(time=time_extended, time_series=time_series_extended) maxima = lsq_signal[lsq_utils.max_bool_func_1st_order_fd()] maxima_time = time[lsq_utils.max_bool_func_1st_order_fd()] maxima_extrapolate = time_series_extended[utils_extended.max_bool_func_1st_order_fd()][-1] maxima_extrapolate_time = time_extended[utils_extended.max_bool_func_1st_order_fd()][-1] minima = lsq_signal[lsq_utils.min_bool_func_1st_order_fd()] minima_time = time[lsq_utils.min_bool_func_1st_order_fd()] minima_extrapolate = time_series_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] minima_extrapolate_time = time_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Single Neuron Neural Network Example') plt.plot(time, lsq_signal, zorder=2, label='Signal') plt.plot(time_extended, time_series_extended, c='g', zorder=1, label=textwrap.fill('Extrapolated signal', 12)) plt.scatter(maxima_time, maxima, c='r', zorder=3, label='Maxima') plt.scatter(minima_time, minima, c='b', zorder=3, label='Minima') plt.scatter(maxima_extrapolate_time, maxima_extrapolate, c='magenta', zorder=3, label=textwrap.fill('Extrapolated maxima', 12)) plt.scatter(minima_extrapolate_time, minima_extrapolate, c='cyan', zorder=4, label=textwrap.fill('Extrapolated minima', 12)) plt.plot(((time[-302] + time[-301]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k', label=textwrap.fill('Neural network inputs', 13)) plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(((time_extended[-1001] + time_extended[-1002]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k') plt.plot(((time[-202] + time[-201]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed', label=textwrap.fill('Neural network targets', 13)) plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2), ((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2), ((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='gray') plt.plot(((time_extended[-1001] + time_extended[-1000]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed') plt.xlim(3.4 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/neural_network.png') plt.show() # plot 6a np.random.seed(0) time = np.linspace(0, 5 * np.pi, 1001) knots_51 = np.linspace(0, 5 * np.pi, 51) time_series = np.cos(2 * time) + np.cos(4 * time) + np.cos(8 * time) noise = np.random.normal(0, 1, len(time_series)) time_series += noise advemdpy = EMD(time=time, time_series=time_series) imfs_51, hts_51, ifs_51 = advemdpy.empirical_mode_decomposition(knots=knots_51, max_imfs=3, edge_effect='symmetric_anchor', verbose=False)[:3] knots_31 = np.linspace(0, 5 * np.pi, 31) imfs_31, hts_31, ifs_31 = advemdpy.empirical_mode_decomposition(knots=knots_31, max_imfs=2, edge_effect='symmetric_anchor', verbose=False)[:3] knots_11 = np.linspace(0, 5 * np.pi, 11) imfs_11, hts_11, ifs_11 = advemdpy.empirical_mode_decomposition(knots=knots_11, max_imfs=1, edge_effect='symmetric_anchor', verbose=False)[:3] fig, axs = plt.subplots(3, 1) plt.suptitle(textwrap.fill('Comparison of Trends Extracted with Different Knot Sequences', 40)) plt.subplots_adjust(hspace=0.1) axs[0].plot(time, time_series, label='Time series') axs[0].plot(time, imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 1, IMF 2, & IMF 3 with 51 knots', 21)) print(f'DFA fluctuation with 51 knots: {np.round(np.var(time_series - (imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :])), 3)}') for knot in knots_51: axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[0].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[0].set_xticklabels(['', '', '', '', '', '']) axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[0].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[0].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].plot(time, time_series, label='Time series') axs[1].plot(time, imfs_31[1, :] + imfs_31[2, :], label=textwrap.fill('Sum of IMF 1 and IMF 2 with 31 knots', 19)) axs[1].plot(time, imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 2 and IMF 3 with 51 knots', 19)) print(f'DFA fluctuation with 31 knots: {np.round(np.var(time_series - (imfs_31[1, :] + imfs_31[2, :])), 3)}') for knot in knots_31: axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[1].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[1].set_xticklabels(['', '', '', '', '', '']) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) axs[1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].plot(
np.linspace(0.95 * np.pi, 1.55 * np.pi, 101)
numpy.linspace
"""Routines for numerical differentiation.""" from __future__ import division import numpy as np from numpy.linalg import norm from scipy.sparse.linalg import LinearOperator from ..sparse import issparse, csc_matrix, csr_matrix, coo_matrix, find from ._group_columns import group_dense, group_sparse EPS = np.finfo(np.float64).eps def _adjust_scheme_to_bounds(x0, h, num_steps, scheme, lb, ub): """Adjust final difference scheme to the presence of bounds. Parameters ---------- x0 : ndarray, shape (n,) Point at which we wish to estimate derivative. h : ndarray, shape (n,) Desired finite difference steps. num_steps : int Number of `h` steps in one direction required to implement finite difference scheme. For example, 2 means that we need to evaluate f(x0 + 2 * h) or f(x0 - 2 * h) scheme : {'1-sided', '2-sided'} Whether steps in one or both directions are required. In other words '1-sided' applies to forward and backward schemes, '2-sided' applies to center schemes. lb : ndarray, shape (n,) Lower bounds on independent variables. ub : ndarray, shape (n,) Upper bounds on independent variables. Returns ------- h_adjusted : ndarray, shape (n,) Adjusted step sizes. Step size decreases only if a sign flip or switching to one-sided scheme doesn't allow to take a full step. use_one_sided : ndarray of bool, shape (n,) Whether to switch to one-sided scheme. Informative only for ``scheme='2-sided'``. """ if scheme == '1-sided': use_one_sided = np.ones_like(h, dtype=bool) elif scheme == '2-sided': h = np.abs(h) use_one_sided = np.zeros_like(h, dtype=bool) else: raise ValueError("`scheme` must be '1-sided' or '2-sided'.") if np.all((lb == -np.inf) & (ub == np.inf)): return h, use_one_sided h_total = h * num_steps h_adjusted = h.copy() lower_dist = x0 - lb upper_dist = ub - x0 if scheme == '1-sided': x = x0 + h_total violated = (x < lb) | (x > ub) fitting = np.abs(h_total) <= np.maximum(lower_dist, upper_dist) h_adjusted[violated & fitting] *= -1 forward = (upper_dist >= lower_dist) & ~fitting h_adjusted[forward] = upper_dist[forward] / num_steps backward = (upper_dist < lower_dist) & ~fitting h_adjusted[backward] = -lower_dist[backward] / num_steps elif scheme == '2-sided': central = (lower_dist >= h_total) & (upper_dist >= h_total) forward = (upper_dist >= lower_dist) & ~central h_adjusted[forward] = np.minimum( h[forward], 0.5 * upper_dist[forward] / num_steps) use_one_sided[forward] = True backward = (upper_dist < lower_dist) & ~central h_adjusted[backward] = -np.minimum( h[backward], 0.5 * lower_dist[backward] / num_steps) use_one_sided[backward] = True min_dist = np.minimum(upper_dist, lower_dist) / num_steps adjusted_central = (~central & (np.abs(h_adjusted) <= min_dist)) h_adjusted[adjusted_central] = min_dist[adjusted_central] use_one_sided[adjusted_central] = False return h_adjusted, use_one_sided relative_step = {"2-point": EPS**0.5, "3-point": EPS**(1/3), "cs": EPS**0.5} def _compute_absolute_step(rel_step, x0, method): if rel_step is None: rel_step = relative_step[method] sign_x0 = (x0 >= 0).astype(float) * 2 - 1 return rel_step * sign_x0 * np.maximum(1.0, np.abs(x0)) def _prepare_bounds(bounds, x0): lb, ub = [np.asarray(b, dtype=float) for b in bounds] if lb.ndim == 0: lb = np.resize(lb, x0.shape) if ub.ndim == 0: ub = np.resize(ub, x0.shape) return lb, ub def group_columns(A, order=0): """Group columns of a 2-D matrix for sparse finite differencing [1]_. Two columns are in the same group if in each row at least one of them has zero. A greedy sequential algorithm is used to construct groups. Parameters ---------- A : array_like or sparse matrix, shape (m, n) Matrix of which to group columns. order : int, iterable of int with shape (n,) or None Permutation array which defines the order of columns enumeration. If int or None, a random permutation is used with `order` used as a random seed. Default is 0, that is use a random permutation but guarantee repeatability. Returns ------- groups : ndarray of int, shape (n,) Contains values from 0 to n_groups-1, where n_groups is the number of found groups. Each value ``groups[i]`` is an index of a group to which ith column assigned. The procedure was helpful only if n_groups is significantly less than n. References ---------- .. [1] <NAME>, <NAME>, and <NAME>, "On the estimation of sparse Jacobian matrices", Journal of the Institute of Mathematics and its Applications, 13 (1974), pp. 117-120. """ if issparse(A): A = csc_matrix(A) else: A = np.atleast_2d(A) A = (A != 0).astype(np.int32) if A.ndim != 2: raise ValueError("`A` must be 2-dimensional.") m, n = A.shape if order is None or np.isscalar(order): rng = np.random.RandomState(order) order = rng.permutation(n) else: order = np.asarray(order) if order.shape != (n,): raise ValueError("`order` has incorrect shape.") A = A[:, order] if issparse(A): groups = group_sparse(m, n, A.indices, A.indptr) else: groups = group_dense(m, n, A) groups[order] = groups.copy() return groups def approx_derivative(fun, x0, method='3-point', rel_step=None, f0=None, bounds=(-np.inf, np.inf), sparsity=None, as_linear_operator=False, args=(), kwargs={}): """Compute finite difference approximation of the derivatives of a vector-valued function. If a function maps from R^n to R^m, its derivatives form m-by-n matrix called the Jacobian, where an element (i, j) is a partial derivative of f[i] with respect to x[j]. Parameters ---------- fun : callable Function of which to estimate the derivatives. The argument x passed to this function is ndarray of shape (n,) (never a scalar even if n=1). It must return 1-D array_like of shape (m,) or a scalar. x0 : array_like of shape (n,) or float Point at which to estimate the derivatives. Float will be converted to a 1-D array. method : {'3-point', '2-point', 'cs'}, optional Finite difference method to use: - '2-point' - use the first order accuracy forward or backward difference. - '3-point' - use central difference in interior points and the second order accuracy forward or backward difference near the boundary. - 'cs' - use a complex-step finite difference scheme. This assumes that the user function is real-valued and can be analytically continued to the complex plane. Otherwise, produces bogus results. rel_step : None or array_like, optional Relative step size to use. The absolute step size is computed as ``h = rel_step * sign(x0) * max(1, abs(x0))``, possibly adjusted to fit into the bounds. For ``method='3-point'`` the sign of `h` is ignored. If None (default) then step is selected automatically, see Notes. f0 : None or array_like, optional If not None it is assumed to be equal to ``fun(x0)``, in this case the ``fun(x0)`` is not called. Default is None. bounds : tuple of array_like, optional Lower and upper bounds on independent variables. Defaults to no bounds. Each bound must match the size of `x0` or be a scalar, in the latter case the bound will be the same for all variables. Use it to limit the range of function evaluation. Bounds checking is not implemented when `as_linear_operator` is True. sparsity : {None, array_like, sparse matrix, 2-tuple}, optional Defines a sparsity structure of the Jacobian matrix. If the Jacobian matrix is known to have only few non-zero elements in each row, then it's possible to estimate its several columns by a single function evaluation [3]_. To perform such economic computations two ingredients are required: * structure : array_like or sparse matrix of shape (m, n). A zero element means that a corresponding element of the Jacobian identically equals to zero. * groups : array_like of shape (n,). A column grouping for a given sparsity structure, use `group_columns` to obtain it. A single array or a sparse matrix is interpreted as a sparsity structure, and groups are computed inside the function. A tuple is interpreted as (structure, groups). If None (default), a standard dense differencing will be used. Note, that sparse differencing makes sense only for large Jacobian matrices where each row contains few non-zero elements. as_linear_operator : bool, optional When True the function returns an `scipy.sparse.linalg.LinearOperator`. Otherwise it returns a dense array or a sparse matrix depending on `sparsity`. The linear operator provides an efficient way of computing ``J.dot(p)`` for any vector ``p`` of shape (n,), but does not allow direct access to individual elements of the matrix. By default `as_linear_operator` is False. args, kwargs : tuple and dict, optional Additional arguments passed to `fun`. Both empty by default. The calling signature is ``fun(x, *args, **kwargs)``. Returns ------- J : {ndarray, sparse matrix, LinearOperator} Finite difference approximation of the Jacobian matrix. If `as_linear_operator` is True returns a LinearOperator with shape (m, n). Otherwise it returns a dense array or sparse matrix depending on how `sparsity` is defined. If `sparsity` is None then a ndarray with shape (m, n) is returned. If `sparsity` is not None returns a csr_matrix with shape (m, n). For sparse matrices and linear operators it is always returned as a 2-D structure, for ndarrays, if m=1 it is returned as a 1-D gradient array with shape (n,). See Also -------- check_derivative : Check correctness of a function computing derivatives. Notes ----- If `rel_step` is not provided, it assigned to ``EPS**(1/s)``, where EPS is machine epsilon for float64 numbers, s=2 for '2-point' method and s=3 for '3-point' method. Such relative step approximately minimizes a sum of truncation and round-off errors, see [1]_. A finite difference scheme for '3-point' method is selected automatically. The well-known central difference scheme is used for points sufficiently far from the boundary, and 3-point forward or backward scheme is used for points near the boundary. Both schemes have the second-order accuracy in terms of Taylor expansion. Refer to [2]_ for the formulas of 3-point forward and backward difference schemes. For dense differencing when m=1 Jacobian is returned with a shape (n,), on the other hand when n=1 Jacobian is returned with a shape (m, 1). Our motivation is the following: a) It handles a case of gradient computation (m=1) in a conventional way. b) It clearly separates these two different cases. b) In all cases np.atleast_2d can be called to get 2-D Jacobian with correct dimensions. References ---------- .. [1] W. H. Press et. al. "Numerical Recipes. The Art of Scientific Computing. 3rd edition", sec. 5.7. .. [2] <NAME>, <NAME>, and <NAME>, "On the estimation of sparse Jacobian matrices", Journal of the Institute of Mathematics and its Applications, 13 (1974), pp. 117-120. .. [3] <NAME>, "Generation of Finite Difference Formulas on Arbitrarily Spaced Grids", Mathematics of Computation 51, 1988. Examples -------- >>> import numpy as np >>> from scipy.optimize import approx_derivative >>> >>> def f(x, c1, c2): ... return np.array([x[0] * np.sin(c1 * x[1]), ... x[0] * np.cos(c2 * x[1])]) ... >>> x0 = np.array([1.0, 0.5 * np.pi]) >>> approx_derivative(f, x0, args=(1, 2)) array([[ 1., 0.], [-1., 0.]]) Bounds can be used to limit the region of function evaluation. In the example below we compute left and right derivative at point 1.0. >>> def g(x): ... return x**2 if x >= 1 else x ... >>> x0 = 1.0 >>> approx_derivative(g, x0, bounds=(-np.inf, 1.0)) array([ 1.]) >>> approx_derivative(g, x0, bounds=(1.0, np.inf)) array([ 2.]) """ if method not in ['2-point', '3-point', 'cs']: raise ValueError("Unknown method '%s'. " % method) x0 = np.atleast_1d(x0) if x0.ndim > 1: raise ValueError("`x0` must have at most 1 dimension.") lb, ub = _prepare_bounds(bounds, x0) if lb.shape != x0.shape or ub.shape != x0.shape: raise ValueError("Inconsistent shapes between bounds and `x0`.") if as_linear_operator and not (np.all(np.isinf(lb)) and np.all(np.isinf(ub))): raise ValueError("Bounds not supported when " "`as_linear_operator` is True.") def fun_wrapped(x): f = np.atleast_1d(fun(x, *args, **kwargs)) if f.ndim > 1: raise RuntimeError("`fun` return value has " "more than 1 dimension.") return f if f0 is None: f0 = fun_wrapped(x0) else: f0 = np.atleast_1d(f0) if f0.ndim > 1: raise ValueError("`f0` passed has more than 1 dimension.") if np.any((x0 < lb) | (x0 > ub)): raise ValueError("`x0` violates bound constraints.") if as_linear_operator: if rel_step is None: rel_step = relative_step[method] return _linear_operator_difference(fun_wrapped, x0, f0, rel_step, method) else: h = _compute_absolute_step(rel_step, x0, method) if method == '2-point': h, use_one_sided = _adjust_scheme_to_bounds( x0, h, 1, '1-sided', lb, ub) elif method == '3-point': h, use_one_sided = _adjust_scheme_to_bounds( x0, h, 1, '2-sided', lb, ub) elif method == 'cs': use_one_sided = False if sparsity is None: return _dense_difference(fun_wrapped, x0, f0, h, use_one_sided, method) else: if not issparse(sparsity) and len(sparsity) == 2: structure, groups = sparsity else: structure = sparsity groups = group_columns(sparsity) if issparse(structure): structure = csc_matrix(structure) else: structure = np.atleast_2d(structure) groups = np.atleast_1d(groups) return _sparse_difference(fun_wrapped, x0, f0, h, use_one_sided, structure, groups, method) def _linear_operator_difference(fun, x0, f0, h, method): m = f0.size n = x0.size if method == '2-point': def matvec(p): if np.array_equal(p, np.zeros_like(p)): return np.zeros(m) dx = h / norm(p) x = x0 + dx*p df = fun(x) - f0 return df / dx elif method == '3-point': def matvec(p): if np.array_equal(p, np.zeros_like(p)): return np.zeros(m) dx = 2*h / norm(p) x1 = x0 - (dx/2)*p x2 = x0 + (dx/2)*p f1 = fun(x1) f2 = fun(x2) df = f2 - f1 return df / dx elif method == 'cs': def matvec(p): if np.array_equal(p, np.zeros_like(p)): return np.zeros(m) dx = h / norm(p) x = x0 + dx*p*1.j f1 = fun(x) df = f1.imag return df / dx else: raise RuntimeError("Never be here.") return LinearOperator((m, n), matvec) def _dense_difference(fun, x0, f0, h, use_one_sided, method): m = f0.size n = x0.size J_transposed = np.empty((n, m)) h_vecs = np.diag(h) for i in range(h.size): if method == '2-point': x = x0 + h_vecs[i] dx = x[i] - x0[i] # Recompute dx as exactly representable number. df = fun(x) - f0 elif method == '3-point' and use_one_sided[i]: x1 = x0 + h_vecs[i] x2 = x0 + 2 * h_vecs[i] dx = x2[i] - x0[i] f1 = fun(x1) f2 = fun(x2) df = -3.0 * f0 + 4 * f1 - f2 elif method == '3-point' and not use_one_sided[i]: x1 = x0 - h_vecs[i] x2 = x0 + h_vecs[i] dx = x2[i] - x1[i] f1 = fun(x1) f2 = fun(x2) df = f2 - f1 elif method == 'cs': f1 = fun(x0 + h_vecs[i]*1.j) df = f1.imag dx = h_vecs[i, i] else: raise RuntimeError("Never be here.") J_transposed[i] = df / dx if m == 1: J_transposed = np.ravel(J_transposed) return J_transposed.T def _sparse_difference(fun, x0, f0, h, use_one_sided, structure, groups, method): m = f0.size n = x0.size row_indices = [] col_indices = [] fractions = [] n_groups = np.max(groups) + 1 for group in range(n_groups): # Perturb variables which are in the same group simultaneously. e = np.equal(group, groups) h_vec = h * e if method == '2-point': x = x0 + h_vec dx = x - x0 df = fun(x) - f0 # The result is written to columns which correspond to perturbed # variables. cols, =
np.nonzero(e)
numpy.nonzero
from __future__ import division from math import sqrt as sqrt from itertools import product as product import torch import numpy as np import cv2 from lib.utils.visualize_utils import TBWriter def vis(func): """tensorboard visualization if has writer as input""" def wrapper(*args, **kw): return func(*args, **kw) if kw['tb_writer'] is not None else None return wrapper class PriorBoxBase(object): """Compute priorbox coordinates in center-offset form for each source feature map. """ def __init__(self, cfg): super(PriorBoxBase, self).__init__() self.image_size = cfg.MODEL.IMAGE_SIZE self._steps = cfg.MODEL.STEPS self._cfg_list = [] self._prior_cfg = {} self._clip = cfg.MODEL.CLIP self._variance = cfg.MODEL.VARIANCE for v in self._variance: if v <= 0: raise ValueError('Variances must be greater than 0') def _setup(self, cfg): num_feat = len(self._steps) for item in self._cfg_list: if item not in cfg.MODEL: raise Exception("wrong anchor config!") if len(cfg.MODEL[item]) != num_feat and len(cfg.MODEL[item]) != 0: raise Exception("config {} length does not match step length!".format(item)) self._prior_cfg[item] = cfg.MODEL[item] @property def num_priors(self): """allow prior num calculation before knowing feature map size""" assert self._prior_cfg is not {} return [int(len(self._create_prior(0, 0, k)) / 4) for k in range(len(self._steps))] def _create_prior(self, cx, cy, k): raise NotImplementedError @vis def _image_proc(self, image=None, tb_writer=None): # TODO test with image if isinstance(image, type(None)): image = np.ones((self.image_size[1], self.image_size[0], 3)) elif isinstance(image, str): image = cv2.imread(image, -1) image = cv2.resize(image, (self.image_size[1], self.image_size[0])) return image @vis def _prior_vis(self, anchor, image_ori, feat_idx, tb_writer=None): # TODO add output path to the signature writer = tb_writer.writer prior_num = self.num_priors[feat_idx] # transform coordinates scale = [self.image_size[1], self.image_size[0], self.image_size[1], self.image_size[0]] bboxs = np.array(anchor).reshape((-1, 4)) box_centers = bboxs[:, :2] * scale[:2] # [x, y] # bboxs: [xmin, ymin, xmax, ymax] bboxs =
np.hstack((bboxs[:, :2] - bboxs[:, 2:4] / 2, bboxs[:, :2] + bboxs[:, 2:4] / 2))
numpy.hstack
from __future__ import absolute_import from __future__ import division from __future__ import print_function import cntk as C import numpy as np from .common import floatx, epsilon, image_dim_ordering, image_data_format from collections import defaultdict from contextlib import contextmanager import warnings C.set_global_option('align_axis', 1) b_any = any dev = C.device.use_default_device() if dev.type() == 0: warnings.warn( 'CNTK backend warning: GPU is not detected. ' 'CNTK\'s CPU version is not fully optimized,' 'please run with GPU to get better performance.') # A learning phase is a bool tensor used to run Keras models in # either train mode (learning_phase == 1) or test mode (learning_phase == 0). # LEARNING_PHASE_PLACEHOLDER is the placeholder for dynamic learning phase _LEARNING_PHASE_PLACEHOLDER = C.constant(shape=(), dtype=np.float32, value=1.0, name='_keras_learning_phase') # static learning phase flag, if it is not 0 or 1, we will go with dynamic learning phase tensor. _LEARNING_PHASE = -1 _UID_PREFIXES = defaultdict(int) # cntk doesn't support gradient as symbolic op, to hook up with keras model, # we will create gradient as a constant placeholder, here use this global # map to keep the mapping from grad placeholder to parameter grad_parameter_dict = {} NAME_SCOPE_STACK = [] @contextmanager def name_scope(name): global NAME_SCOPE_STACK NAME_SCOPE_STACK.append(name) yield NAME_SCOPE_STACK.pop() def get_uid(prefix=''): _UID_PREFIXES[prefix] += 1 return _UID_PREFIXES[prefix] def learning_phase(): # If _LEARNING_PHASE is not 0 or 1, return dynamic learning phase tensor return _LEARNING_PHASE if _LEARNING_PHASE in {0, 1} else _LEARNING_PHASE_PLACEHOLDER def set_learning_phase(value): global _LEARNING_PHASE if value not in {0, 1}: raise ValueError('CNTK Backend: Set learning phase ' 'with value %s is not supported, ' 'expected 0 or 1.' % value) _LEARNING_PHASE = value def clear_session(): """Reset learning phase flag for cntk backend. """ global _LEARNING_PHASE global _LEARNING_PHASE_PLACEHOLDER _LEARNING_PHASE = -1 _LEARNING_PHASE_PLACEHOLDER.value = np.asarray(1.0) def in_train_phase(x, alt, training=None): global _LEARNING_PHASE if training is None: training = learning_phase() uses_learning_phase = True else: uses_learning_phase = False # CNTK currently don't support cond op, so here we use # element_select approach as workaround. It may have # perf issue, will resolve it later with cntk cond op. if callable(x) and isinstance(x, C.cntk_py.Function) is False: x = x() if callable(alt) and isinstance(alt, C.cntk_py.Function) is False: alt = alt() if training is True: x._uses_learning_phase = uses_learning_phase return x else: # if _LEARNING_PHASE is static if isinstance(training, int) or isinstance(training, bool): result = x if training == 1 or training is True else alt else: result = C.element_select(training, x, alt) result._uses_learning_phase = uses_learning_phase return result def in_test_phase(x, alt, training=None): return in_train_phase(alt, x, training=training) def _convert_string_dtype(dtype): # cntk only support float32 and float64 if dtype == 'float32': return np.float32 elif dtype == 'float64': return np.float64 else: # cntk only running with float, # try to cast to float to run the model return np.float32 def _convert_dtype_string(dtype): if dtype == np.float32: return 'float32' elif dtype == np.float64: return 'float64' else: raise ValueError('CNTK Backend: Unsupported dtype: %s. ' 'CNTK only supports float32 and ' 'float64.' % dtype) def variable(value, dtype=None, name=None, constraint=None): """Instantiates a variable and returns it. # Arguments value: Numpy array, initial value of the tensor. dtype: Tensor type. name: Optional name string for the tensor. constraint: Optional projection function to be applied to the variable after an optimizer update. # Returns A variable instance (with Keras metadata included). """ if dtype is None: dtype = floatx() if name is None: name = '' if isinstance( value, C.variables.Constant) or isinstance( value, C.variables.Parameter): value = value.value # we don't support init parameter with symbolic op, so eval it first as # workaround if isinstance(value, C.cntk_py.Function): value = eval(value) shape = value.shape if hasattr(value, 'shape') else () if hasattr(value, 'dtype') and value.dtype != dtype and len(shape) > 0: value = value.astype(dtype) # TODO: remove the conversion when cntk supports int32, int64 # https://docs.microsoft.com/en-us/python/api/cntk.variables.parameter dtype = 'float32' if 'int' in str(dtype) else dtype v = C.parameter(shape=shape, init=value, dtype=dtype, name=_prepare_name(name, 'variable')) v._keras_shape = v.shape v._uses_learning_phase = False v.constraint = constraint return v def bias_add(x, bias, data_format=None): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) dims = len(x.shape) if dims > 0 and x.shape[0] == C.InferredDimension: dims -= 1 bias_dims = len(bias.shape) if bias_dims != 1 and bias_dims != dims: raise ValueError('Unexpected bias dimensions %d, ' 'expected 1 or %d dimensions' % (bias_dims, dims)) if dims == 4: if data_format == 'channels_first': if bias_dims == 1: shape = (bias.shape[0], 1, 1, 1) else: shape = (bias.shape[3],) + bias.shape[:3] elif data_format == 'channels_last': if bias_dims == 1: shape = (1, 1, 1, bias.shape[0]) else: shape = bias.shape elif dims == 3: if data_format == 'channels_first': if bias_dims == 1: shape = (bias.shape[0], 1, 1) else: shape = (bias.shape[2],) + bias.shape[:2] elif data_format == 'channels_last': if bias_dims == 1: shape = (1, 1, bias.shape[0]) else: shape = bias.shape elif dims == 2: if data_format == 'channels_first': if bias_dims == 1: shape = (bias.shape[0], 1) else: shape = (bias.shape[1],) + bias.shape[:1] elif data_format == 'channels_last': if bias_dims == 1: shape = (1, bias.shape[0]) else: shape = bias.shape else: shape = bias.shape return x + reshape(bias, shape) def eval(x): if isinstance(x, C.cntk_py.Function): return x.eval() elif isinstance(x, C.variables.Constant) or isinstance(x, C.variables.Parameter): return x.value else: raise ValueError('CNTK Backend: `eval` method on ' '`%s` type is not supported. ' 'CNTK only supports `eval` with ' '`Function`, `Constant` or ' '`Parameter`.' % type(x)) def placeholder( shape=None, ndim=None, dtype=None, sparse=False, name=None, dynamic_axis_num=1): if dtype is None: dtype = floatx() if not shape: if ndim: shape = tuple([None for _ in range(ndim)]) dynamic_dimension = C.FreeDimension if _get_cntk_version() >= 2.2 else C.InferredDimension cntk_shape = [dynamic_dimension if s is None else s for s in shape] cntk_shape = tuple(cntk_shape) if dynamic_axis_num > len(cntk_shape): raise ValueError('CNTK backend: creating placeholder with ' '%d dimension is not supported, at least ' '%d dimensions are needed.' % (len(cntk_shape, dynamic_axis_num))) if name is None: name = '' cntk_shape = cntk_shape[dynamic_axis_num:] x = C.input( shape=cntk_shape, dtype=_convert_string_dtype(dtype), is_sparse=sparse, name=name) x._keras_shape = shape x._uses_learning_phase = False x._cntk_placeholder = True return x def is_placeholder(x): """Returns whether `x` is a placeholder. # Arguments x: A candidate placeholder. # Returns Boolean. """ return hasattr(x, '_cntk_placeholder') and x._cntk_placeholder def is_keras_tensor(x): if not is_tensor(x): raise ValueError('Unexpectedly found an instance of type `' + str(type(x)) + '`. ' 'Expected a symbolic tensor instance.') return hasattr(x, '_keras_history') def is_tensor(x): return isinstance(x, (C.variables.Constant, C.variables.Variable, C.variables.Parameter, C.ops.functions.Function)) def shape(x): shape = list(int_shape(x)) num_dynamic = _get_dynamic_axis_num(x) non_dyn_shape = [] for i in range(len(x.shape)): if shape[i + num_dynamic] is None: non_dyn_shape.append(x.shape[i]) else: non_dyn_shape.append(shape[i + num_dynamic]) return shape[:num_dynamic] + non_dyn_shape def is_sparse(tensor): return tensor.is_sparse def int_shape(x): if hasattr(x, '_keras_shape'): return x._keras_shape shape = x.shape if hasattr(x, 'dynamic_axes'): dynamic_shape = [None for a in x.dynamic_axes] shape = tuple(dynamic_shape) + shape return shape def ndim(x): shape = int_shape(x) return len(shape) def _prepare_name(name, default): prefix = '_'.join(NAME_SCOPE_STACK) if name is None or name == '': return prefix + '/' + default return prefix + '/' + name def constant(value, dtype=None, shape=None, name=None): if dtype is None: dtype = floatx() if shape is None: shape = () np_value = value * np.ones(shape) const = C.constant(np_value, dtype=dtype, name=_prepare_name(name, 'constant')) const._keras_shape = const.shape const._uses_learning_phase = False return const def random_binomial(shape, p=0.0, dtype=None, seed=None): # use numpy workaround now if seed is None: # ensure that randomness is conditioned by the Numpy RNG seed = np.random.randint(10e7) np.random.seed(seed) if dtype is None: dtype = np.float32 else: dtype = _convert_string_dtype(dtype) size = 1 for _ in shape: if _ is None: raise ValueError('CNTK Backend: randomness op with ' 'dynamic shape is not supported now. ' 'Please provide fixed dimension ' 'instead of `None`.') size *= _ binomial = np.random.binomial(1, p, size).astype(dtype).reshape(shape) return variable(value=binomial, dtype=dtype) def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None): for _ in shape: if _ is None: raise ValueError('CNTK Backend: randomness op with ' 'dynamic shape is not supported now. ' 'Please provide fixed dimension ' 'instead of `None`.') return random_uniform_variable(shape, minval, maxval, dtype, seed) def random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None): if dtype is None: dtype = floatx() if seed is None: # ensure that randomness is conditioned by the Numpy RNG seed = np.random.randint(10e3) if dtype is None: dtype = np.float32 else: dtype = _convert_string_dtype(dtype) if name is None: name = '' scale = (high - low) / 2 p = C.parameter( shape, init=C.initializer.uniform( scale, seed=seed), dtype=dtype, name=name) return variable(value=p.value + low + scale) def random_normal_variable( shape, mean, scale, dtype=None, name=None, seed=None): if dtype is None: dtype = floatx() if seed is None: # ensure that randomness is conditioned by the Numpy RNG seed = np.random.randint(10e7) if dtype is None: dtype = np.float32 else: dtype = _convert_string_dtype(dtype) if name is None: name = '' return C.parameter( shape=shape, init=C.initializer.normal( scale=scale, seed=seed), dtype=dtype, name=name) def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): if dtype is None: dtype = floatx() for _ in shape: if _ is None: raise ValueError('CNTK Backend: randomness op with ' 'dynamic shape is not supported now. ' 'Please provide fixed dimension ' 'instead of `None`.') # how to apply mean and stddev return random_normal_variable(shape=shape, mean=mean, scale=1.0, seed=seed) def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): if seed is None: seed = np.random.randint(1, 10e6) if dtype is None: dtype = np.float32 else: dtype = _convert_string_dtype(dtype) return C.parameter( shape, init=C.initializer.truncated_normal( stddev, seed=seed), dtype=dtype) def dtype(x): return _convert_dtype_string(x.dtype) def zeros(shape, dtype=None, name=None): if dtype is None: dtype = floatx() ctype = _convert_string_dtype(dtype) return variable(value=np.zeros(shape, ctype), dtype=dtype, name=name) def ones(shape, dtype=None, name=None): if dtype is None: dtype = floatx() ctype = _convert_string_dtype(dtype) return variable(value=np.ones(shape, ctype), dtype=dtype, name=name) def eye(size, dtype=None, name=None): if dtype is None: dtype = floatx() return variable(np.eye(size), dtype, name) def zeros_like(x, dtype=None, name=None): return x * 0 def ones_like(x, dtype=None, name=None): return zeros_like(x) + 1 def count_params(x): for _ in x.shape: if _ == C.InferredDimension or _ == C.FreeDimension: raise ValueError('CNTK backend: `count_params` with dynamic ' 'shape is not supported. Please provide ' 'fixed dimension instead of `None`.') return np.prod(int_shape(x)) def cast(x, dtype): # cntk calculate everything in float, so don't need case from bool / int return x def dot(x, y): if len(x.shape) > 2 or len(y.shape) > 2: y_shape = int_shape(y) if len(y_shape) > 2: permutation = [len(y_shape) - 2] permutation += list(range(len(y_shape) - 2)) permutation += [len(y_shape) - 1] y = C.transpose(y, perm=permutation) return C.times(x, y, len(y_shape) - 1) else: return C.times(x, y) def batch_dot(x, y, axes=None): x_shape = int_shape(x) y_shape = int_shape(y) if isinstance(axes, int): axes = (axes, axes) if axes is None: # behaves like tf.batch_matmul as default axes = [len(x_shape) - 1, len(y_shape) - 2] if b_any([isinstance(a, (list, tuple)) for a in axes]): raise ValueError('Multiple target dimensions are not supported. ' + 'Expected: None, int, (int, int), ' + 'Provided: ' + str(axes)) if len(x_shape) == 2 and len(y_shape) == 2: if axes[0] == axes[1]: result = sum(x * y, axis=axes[0], keepdims=True) return result if axes[0] == 1 else transpose(result) else: return sum(x * transpose(y), axis=axes[0], keepdims=True) else: if len(y_shape) == 2: y = expand_dims(y) normalized_axis = [] normalized_axis.append(_normalize_axis(axes[0], x)[0]) normalized_axis.append(_normalize_axis(axes[1], y)[0]) # transpose i = normalized_axis[0] while i < len(x.shape) - 1: x = C.swapaxes(x, i, i + 1) i += 1 i = normalized_axis[1] while i > 0: y = C.swapaxes(y, i, i - 1) i -= 1 result = C.times(x, y, output_rank=(len(y.shape) - 1) if len(y.shape) > 1 else 1) if len(y_shape) == 2: result = squeeze(result, -1) return result def transpose(x): return C.swapaxes(x, 0, 1) def gather(reference, indices): # There is a bug in cntk gather op which may cause crash. # We have made a fix but not catched in CNTK 2.1 release. # Will update with gather op in next release if _get_cntk_version() >= 2.2: return C.ops.gather(reference, indices) else: num_classes = reference.shape[0] one_hot_matrix = C.ops.one_hot(indices, num_classes) return C.times(one_hot_matrix, reference, output_rank=len(reference.shape) - 1) def _remove_dims(x, axis, keepdims=False): if keepdims is False and isinstance(axis, list): # sequence axis is removed by default, so don't need reshape on it reduce_axes = [] for a in axis: if isinstance(a, C.Axis) is False: reduce_axes.append(a) return _reshape_dummy_dim(x, reduce_axes) else: if isinstance(axis, list): has_seq = False for a in axis: if isinstance(a, C.Axis): has_seq = True break if has_seq: nones = _get_dynamic_axis_num(x) x = expand_dims(x, nones) return x def max(x, axis=None, keepdims=False): axis = _normalize_axis(axis, x) output = _reduce_on_axis(x, axis, 'reduce_max') return _remove_dims(output, axis, keepdims) def min(x, axis=None, keepdims=False): axis = _normalize_axis(axis, x) output = _reduce_on_axis(x, axis, 'reduce_min') return _remove_dims(output, axis, keepdims) def sum(x, axis=None, keepdims=False): axis = _normalize_axis(axis, x) output = _reduce_on_axis(x, axis, 'reduce_sum') return _remove_dims(output, axis, keepdims) def prod(x, axis=None, keepdims=False): axis = _normalize_axis(axis, x) output = _reduce_on_axis(x, axis, 'reduce_prod') return _remove_dims(output, axis, keepdims) def logsumexp(x, axis=None, keepdims=False): return log(sum(exp(x), axis=axis, keepdims=keepdims)) def var(x, axis=None, keepdims=False): m = mean(x, axis, keepdims=True) devs_squared = C.square(x - m) return mean(devs_squared, axis=axis, keepdims=keepdims) def std(x, axis=None, keepdims=False): return C.sqrt(var(x, axis=axis, keepdims=keepdims)) def expand_dims(x, axis=-1): shape = list(int_shape(x)) nones = _get_dynamic_axis_num(x) index = axis if axis >= 0 else len(shape) + 1 shape.insert(index, 1) new_shape = shape[nones:] new_shape = tuple( [C.InferredDimension if _ is None else _ for _ in new_shape]) result = C.reshape(x, new_shape) if index < nones: result._keras_shape = shape return result def squeeze(x, axis): if isinstance(axis, tuple): axis = list(axis) if not isinstance(axis, list): axis = [axis] shape = list(int_shape(x)) _axis = [] for _ in axis: if isinstance(_, int): _axis.append(_ if _ >= 0 else _ + len(shape)) if len(_axis) == 0: return x nones = _get_dynamic_axis_num(x) for _ in sorted(_axis, reverse=True): del shape[_] new_shape = shape[nones:] new_shape = tuple([C.InferredDimension if _ == C.FreeDimension else _ for _ in new_shape]) return C.reshape(x, new_shape) def tile(x, n): if isinstance(n, int): n = (n,) elif isinstance(n, list): n = tuple(n) shape = int_shape(x) num_dynamic_axis = _get_dynamic_axis_num(x) # Padding the axis if len(n) < len(shape): n = tuple([1 for _ in range(len(shape) - len(n))]) + n if len(n) != len(shape): raise NotImplementedError i = num_dynamic_axis for i, rep in enumerate(n): if i >= num_dynamic_axis and shape[i] is not None: tmp = [x] * rep x = C.splice(*tmp, axis=i - num_dynamic_axis) i += 1 return x def _normalize_axis(axis, x): shape = int_shape(x) ndim = len(shape) nones = _get_dynamic_axis_num(x) if nones > ndim: raise ValueError('CNTK Backend: tensor with keras shape: `%s` has ' '%d cntk dynamic axis, this is not expected, please ' 'double check the keras shape history.' % (str(shape), nones)) # Current cntk does not support shape like (1, batch). so using the workaround # here to mapping the correct axis. Will remove this tricky after we add support # in native cntk op cntk_axis = [] dynamic_axis_index = 0 for i in range(ndim): if shape[i] is None and dynamic_axis_index < nones: cntk_axis.append(x.dynamic_axes[dynamic_axis_index]) dynamic_axis_index += 1 else: cntk_axis.append(i - dynamic_axis_index) if dynamic_axis_index < nones: i = 0 while dynamic_axis_index < nones: cntk_axis[i] = x.dynamic_axes[dynamic_axis_index] i += 1 dynamic_axis_index += 1 while i < len(cntk_axis): cntk_axis[i] -= nones i += 1 if isinstance(axis, tuple): _axis = list(axis) elif isinstance(axis, int): _axis = [axis] elif isinstance(axis, list): _axis = list(axis) else: _axis = axis if isinstance(_axis, list): for i, a in enumerate(_axis): if a is not None and a < 0: _axis[i] = (a % ndim) if _axis[i] is not None: _axis[i] = cntk_axis[_axis[i]] else: if _axis is None: _axis = C.Axis.all_axes() return _axis def _reshape_dummy_dim(x, axis): shape = list(x.shape) _axis = [_ + len(shape) if _ < 0 else _ for _ in axis] if shape.count(C.InferredDimension) > 1 or shape.count(C.FreeDimension) > 1: result = x for index in sorted(_axis, reverse=True): result = C.reshape(result, shape=(), begin_axis=index, end_axis=index + 1) return result else: for index in sorted(_axis, reverse=True): del shape[index] shape = [C.InferredDimension if _ == C.FreeDimension else _ for _ in shape] return C.reshape(x, shape) def mean(x, axis=None, keepdims=False): axis = _normalize_axis(axis, x) output = _reduce_on_axis(x, axis, 'reduce_mean') return _remove_dims(output, axis, keepdims) def any(x, axis=None, keepdims=False): reduce_result = sum(x, axis, keepdims=keepdims) any_matrix = C.element_select( reduce_result, ones_like(reduce_result), zeros_like(reduce_result)) if len(reduce_result.shape) == 0 and _get_dynamic_axis_num(x) == 0: return C.reduce_sum(any_matrix) else: return any_matrix def all(x, axis=None, keepdims=False): reduce_result = prod(x, axis, keepdims=keepdims) all_matrix = C.element_select( reduce_result, ones_like(reduce_result), zeros_like(reduce_result)) if len(reduce_result.shape) == 0 and _get_dynamic_axis_num(x) == 0: return C.reduce_sum(all_matrix) else: return all_matrix def classification_error(target, output, axis=-1): return C.ops.reduce_mean( C.equal( argmax( output, axis=-1), argmax( target, axis=-1)), axis=C.Axis.all_axes()) def argmax(x, axis=-1): axis = [axis] axis = _normalize_axis(axis, x) output = C.ops.argmax(x, axis=axis[0]) return _reshape_dummy_dim(output, axis) def argmin(x, axis=-1): axis = [axis] axis = _normalize_axis(axis, x) output = C.ops.argmin(x, axis=axis[0]) return _reshape_dummy_dim(output, axis) def square(x): return C.square(x) def abs(x): return C.abs(x) def sqrt(x): return C.sqrt(x) def exp(x): return C.exp(x) def log(x): return C.log(x) def round(x): return C.round(x) def sigmoid(x): return C.sigmoid(x) def sign(x): return x / C.abs(x) def pow(x, a): return C.pow(x, a) def clip(x, min_value, max_value): if max_value is not None and max_value < min_value: max_value = min_value if max_value is None: max_value = np.inf if min_value is None: min_value = -np.inf return C.clip(x, min_value, max_value) def binary_crossentropy(target, output, from_logits=False): if from_logits: output = C.sigmoid(output) output = C.clip(output, epsilon(), 1.0 - epsilon()) output = -target * C.log(output) - (1.0 - target) * C.log(1.0 - output) return output def get_variable_shape(x): return int_shape(x) def update(x, new_x): return C.assign(x, new_x) def moving_average_update(variable, value, momentum): return C.assign(variable, variable * momentum + value * (1. - momentum)) def update_add(x, increment): result = x + increment return C.assign(x, result) def gradients(loss, variables): # cntk does not support gradients as symbolic op, # to hook up with keras model # we will return a constant as place holder, the cntk learner will apply # the gradient during training. global grad_parameter_dict if isinstance(variables, list) is False: variables = [variables] grads = [] for v in variables: g = C.constant(0, shape=v.shape, name='keras_grad_placeholder') grads.append(g) grad_parameter_dict[g] = v return grads def equal(x, y): return C.equal(x, y) def not_equal(x, y): return C.not_equal(x, y) def greater(x, y): return C.greater(x, y) def greater_equal(x, y): return C.greater_equal(x, y) def less(x, y): return C.less(x, y) def less_equal(x, y): return C.less_equal(x, y) def maximum(x, y): return C.element_max(x, y) def minimum(x, y): return C.element_min(x, y) def sin(x): return C.sin(x) def cos(x): return C.cos(x) def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3): if gamma is None: if beta is None: gamma = ones_like(x) else: gamma = ones_like(beta) if beta is None: if gamma is None: beta = zeros_like(x) else: beta = zeros_like(gamma) mean, variant = _moments(x, _normalize_axis(reduction_axes, x)) if sorted(reduction_axes) == list(range(ndim(x)))[:-1]: normalized = batch_normalization( x, mean, variant, beta, gamma, epsilon) else: # need broadcasting target_shape = [] x_shape = int_shape(x) # skip the batch axis for axis in range(1, ndim(x)): if axis in reduction_axes: target_shape.append(1) if ndim(gamma) > axis: gamma = C.reduce_mean(gamma, axis - 1) beta = C.reduce_mean(beta, axis - 1) else: target_shape.append(x_shape[axis]) broadcast_mean = C.reshape(mean, target_shape) broadcast_var = C.reshape(variant, target_shape) broadcast_gamma = C.reshape(gamma, target_shape) broadcast_beta = C.reshape(beta, target_shape) normalized = batch_normalization( x, broadcast_mean, broadcast_var, broadcast_beta, broadcast_gamma, epsilon) return normalized, mean, variant def _moments(x, axes=None, shift=None, keep_dims=False): _axes = tuple(axes) if shift is None: shift = x # Compute true mean while keeping the dims for proper broadcasting. for axis in _axes: shift = C.reduce_mean(shift, axis=axis) shift = C.stop_gradient(shift) shifted_mean = C.minus(x, shift) for axis in _axes: shifted_mean = C.reduce_mean(shifted_mean, axis=axis) variance_mean = C.square(C.minus(x, shift)) for axis in _axes: variance_mean = C.reduce_mean(variance_mean, axis=axis) variance = C.minus(variance_mean, C.square(shifted_mean)) mean = C.plus(shifted_mean, shift) if not keep_dims: mean = squeeze(mean, _axes) variance = squeeze(variance, _axes) return mean, variance def batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3): # The mean / var / beta / gamma may be processed by broadcast # so it may have an extra batch axis with 1, it is not needed # in cntk, need to remove those dummy axis. if ndim(mean) == ndim(x) and shape(mean)[0] == 1: mean = _reshape_dummy_dim(mean, [0]) if ndim(var) == ndim(x) and shape(var)[0] == 1: var = _reshape_dummy_dim(var, [0]) if gamma is None: gamma = ones_like(var) elif ndim(gamma) == ndim(x) and shape(gamma)[0] == 1: gamma = _reshape_dummy_dim(gamma, [0]) if beta is None: beta = zeros_like(mean) elif ndim(beta) == ndim(x) and shape(beta)[0] == 1: beta = _reshape_dummy_dim(beta, [0]) return (x - mean) / (C.sqrt(var) + epsilon) * gamma + beta def concatenate(tensors, axis=-1): if len(tensors) == 0: return None axis = [axis] axis = _normalize_axis(axis, tensors[0]) return C.splice(*tensors, axis=axis[0]) def flatten(x): return reshape(x, (-1,)) def reshape(x, shape): shape = tuple([C.InferredDimension if _ == C.FreeDimension else _ for _ in shape]) if isinstance(x, C.variables.Parameter): return C.reshape(x, shape) else: num_dynamic_axis = _get_dynamic_axis_num(x) if num_dynamic_axis == 1 and len(shape) > 0 and shape[0] == -1: # collapse axis with batch axis if b_any(_ == C.InferredDimension for _ in x.shape) or b_any( _ == C.FreeDimension for _ in x.shape): warnings.warn( 'Warning: CNTK backend does not support ' 'collapse of batch axis with inferred dimension. ' 'The reshape did not take place.') return x return _reshape_batch(x, shape) else: # no collapse, then first need to padding the shape if num_dynamic_axis >= len(shape): i = 0 while i < len(shape): if shape[i] is None or shape[i] == -1: i += 1 else: break shape = tuple([-1 for _ in range(num_dynamic_axis - i)]) + shape new_shape = list(shape) new_shape = new_shape[num_dynamic_axis:] new_shape = [C.InferredDimension if _ is None else _ for _ in new_shape] return C.reshape(x, new_shape) def permute_dimensions(x, pattern): dims = len(int_shape(x)) num_dynamic_axis = _get_dynamic_axis_num(x) if isinstance(pattern, list): current_layout = [i for i in range(dims)] else: current_layout = tuple([i for i in range(dims)]) if num_dynamic_axis > 0 and pattern[:num_dynamic_axis] != current_layout[:num_dynamic_axis]: raise ValueError('CNTK backend: the permute pattern %s ' 'requested permute on dynamic axis, ' 'which is not supported. Please do permute ' 'on static axis.' % pattern) axis = list(pattern) axis = axis[num_dynamic_axis:] axis = _normalize_axis(axis, x) return C.transpose(x, axis) def resize_images(x, height_factor, width_factor, data_format): if data_format == 'channels_first': output = repeat_elements(x, height_factor, axis=2) output = repeat_elements(output, width_factor, axis=3) return output elif data_format == 'channels_last': output = repeat_elements(x, height_factor, axis=1) output = repeat_elements(output, width_factor, axis=2) return output else: raise ValueError('CNTK Backend: Invalid data_format:', data_format) def resize_volumes(x, depth_factor, height_factor, width_factor, data_format): if data_format == 'channels_first': output = repeat_elements(x, depth_factor, axis=2) output = repeat_elements(output, height_factor, axis=3) output = repeat_elements(output, width_factor, axis=4) return output elif data_format == 'channels_last': output = repeat_elements(x, depth_factor, axis=1) output = repeat_elements(output, height_factor, axis=2) output = repeat_elements(output, width_factor, axis=3) return output else: raise ValueError('CNTK Backend: Invalid data_format:', data_format) def repeat_elements(x, rep, axis): axis = _normalize_axis(axis, x) axis = axis[0] slices = [] shape = x.shape i = 0 while i < shape[axis]: tmp = C.ops.slice(x, axis, i, i + 1) for _ in range(rep): slices.append(tmp) i += 1 return C.splice(*slices, axis=axis) def repeat(x, n): # this is a workaround for recurrent layer # if n is inferred dimension, # we can't figure out how to repeat it in cntk now # return the same x to take cntk broadcast feature # to make the recurrent layer work. # need to be fixed in GA. if n is C.InferredDimension or n is C.FreeDimension: return x index = 1 - _get_dynamic_axis_num(x) if index < 0 or index > 1: raise NotImplementedError new_shape = list(x.shape) new_shape.insert(index, 1) new_shape = tuple(new_shape) x = C.reshape(x, new_shape) temp = [x] * n return C.splice(*temp, axis=index) def tanh(x): return C.tanh(x) def _static_rnn(step_function, inputs, initial_states, go_backwards=False, mask=None, constants=None, unroll=False, input_length=None): shape = int_shape(inputs) dims = len(shape) uses_learning_phase = False if dims < 3: raise ValueError('Input should be at least 3D.') # if the second axis is static axis, CNTK will do unroll by default if shape[1] is None: raise ValueError('CNTK Backend: the input of static rnn ' 'has shape `%s`, the second axis ' 'is not static. If you want to run ' 'rnn with non-static axis, please try ' 'dynamic rnn with sequence axis.' % shape) if constants is None: constants = [] if mask is not None: mask_shape = int_shape(mask) if len(mask_shape) == dims - 1: mask = expand_dims(mask) nones = _get_dynamic_axis_num(inputs) states = tuple(initial_states) outputs = [] time_axis = 1 - nones if nones > 0 else 1 if go_backwards: i = shape[1] - 1 while i >= 0: current = C.ops.slice(inputs, time_axis, i, i + 1) # remove dummy dimension current = squeeze(current, time_axis) output, new_states = step_function( current, tuple(states) + tuple(constants)) if getattr(output, '_uses_learning_phase', False): uses_learning_phase = True if mask is not None: mask_slice = C.ops.slice(mask, time_axis, i, i + 1) mask_slice = squeeze(mask_slice, time_axis) if len(outputs) == 0: prev_output = zeros_like(output) else: prev_output = outputs[-1] output = C.ops.element_select(mask_slice, output, prev_output) return_states = [] for s, n_s in zip(states, new_states): return_states.append( C.ops.element_select( mask_slice, n_s, s)) new_states = return_states outputs.append(output) states = new_states i -= 1 else: i = 0 while i < shape[1]: current = C.ops.slice(inputs, time_axis, i, i + 1) # remove dummy dimension current = squeeze(current, 1) output, new_states = step_function( current, tuple(states) + tuple(constants)) if getattr(output, '_uses_learning_phase', False): uses_learning_phase = True if mask is not None: mask_slice = C.ops.slice(mask, time_axis, i, i + 1) mask_slice = squeeze(mask_slice, 1) if len(outputs) == 0: prev_output = zeros_like(output) else: prev_output = outputs[-1] output = C.ops.element_select(mask_slice, output, prev_output) return_states = [] for s, n_s in zip(states, new_states): return_states.append( C.ops.element_select( mask_slice, n_s, s)) new_states = return_states outputs.append(output) states = new_states[:len(states)] i += 1 i = 1 # add the time_step axis back final_output = expand_dims(outputs[0], 1) last_output = outputs[0] while i < len(outputs): # add the time_step axis back output_slice = expand_dims(outputs[i], 1) final_output = C.splice(final_output, output_slice, axis=time_axis) last_output = outputs[i] i += 1 last_output._uses_learning_phase = uses_learning_phase return last_output, final_output, states def rnn(step_function, inputs, initial_states, go_backwards=False, mask=None, constants=None, unroll=False, input_length=None): shape = int_shape(inputs) dims = len(shape) global uses_learning_phase uses_learning_phase = False if dims < 3: raise ValueError('CNTK Backend: the input of rnn has only rank %d ' 'Need at least rank 3 to run RNN.' % dims) if _get_dynamic_axis_num(inputs) == 0 or unroll: return _static_rnn( step_function, inputs, initial_states, go_backwards, mask, constants, unroll, input_length) if constants is None: constants = [] num_time_step = shape[1] if num_time_step is None and not has_seq_axis(inputs): num_time_step = inputs.shape[0] initial = [] for s in initial_states: if _get_dynamic_axis_num(s) == 0: if hasattr(C, 'to_batch'): initial.append(C.to_batch(s)) else: initial.append(C.user_function(ConvertToBatch(s))) else: initial.append(s) need_convert = not has_seq_axis(inputs) if go_backwards and need_convert is False: raise NotImplementedError('CNTK Backend: `go_backwards` is not supported with ' 'variable-length sequences. Please specify a ' 'static length for your sequences.') rnn_inputs = inputs if need_convert: if go_backwards: rnn_inputs = reverse(rnn_inputs, 1) rnn_inputs = C.to_sequence(rnn_inputs) rnn_constants = [] for constant in constants: if isinstance(constant, list): new_c = [] for c in constant: if _get_dynamic_axis_num(c) == 1: new_c.append(C.sequence.broadcast_as(c, rnn_inputs)) else: new_c.append(c) rnn_constants.append(new_c) else: if _get_dynamic_axis_num(constant) == 1: rnn_constants.append(C.sequence.broadcast_as(constant, rnn_inputs)) else: rnn_constants.append(constant) else: rnn_constants = constants if mask is not None and not has_seq_axis(mask): if go_backwards: mask = reverse(mask, 1) if len(int_shape(mask)) == 2: mask = expand_dims(mask) mask = C.to_sequence_like(mask, rnn_inputs) states = tuple(initial) with C.default_options(axis_offset=1): def _recurrence(x, states, m): # create place holder place_holders = [C.placeholder(dynamic_axes=x.dynamic_axes) for _ in states] past_values = [] for s, p in zip(states, place_holders): past_values.append(C.sequence.past_value(p, s)) new_output, new_states = step_function( x, tuple(past_values) + tuple(rnn_constants)) if getattr(new_output, '_uses_learning_phase', False): global uses_learning_phase uses_learning_phase = True if m is not None: new_states = [C.element_select(m, n, s) for n, s in zip(new_states, past_values)] n_s = [] for o, p in zip(new_states, place_holders): n_s.append(o.replace_placeholders({p: o.output})) if len(n_s) > 0: new_output = n_s[0] return new_output, n_s final_output, final_states = _recurrence(rnn_inputs, states, mask) last_output = C.sequence.last(final_output) last_states = [C.sequence.last(s) for s in final_states] if need_convert: final_output = C.sequence.unpack(final_output, 0, no_mask_output=True) if num_time_step is not None and num_time_step is not C.FreeDimension: final_output = _reshape_sequence(final_output, num_time_step) f_stats = [] for l_s, i_s in zip(last_states, initial_states): if _get_dynamic_axis_num(i_s) == 0 and _get_dynamic_axis_num(l_s) == 1: if hasattr(C, 'unpack_batch'): f_stats.append(C.unpack_batch(l_s)) else: f_stats.append(C.user_function(ConvertToStatic(l_s, batch_size=i_s.shape[0]))) else: f_stats.append(l_s) last_output._uses_learning_phase = uses_learning_phase return last_output, final_output, f_stats def has_seq_axis(x): return hasattr(x, 'dynamic_axes') and len(x.dynamic_axes) > 1 def l2_normalize(x, axis=None): axis = [axis] axis = _normalize_axis(axis, x) norm = C.sqrt(C.reduce_sum(C.square(x), axis=axis[0])) return x / norm def hard_sigmoid(x): x = (0.2 * x) + 0.5 x = C.clip(x, 0.0, 1.0) return x def conv1d(x, kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) if padding == 'causal': # causal (dilated) convolution: left_pad = dilation_rate * (kernel.shape[0] - 1) x = temporal_padding(x, (left_pad, 0)) padding = 'valid' if data_format == 'channels_last': x = C.swapaxes(x, 0, 1) kernel = C.swapaxes(kernel, 0, 2) padding = _preprocess_border_mode(padding) strides = [strides] x = C.convolution( kernel, x, strides=tuple(strides), auto_padding=[ False, padding]) if data_format == 'channels_last': x = C.swapaxes(x, 0, 1) return x def conv2d(x, kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) x = _preprocess_conv2d_input(x, data_format) kernel = _preprocess_conv2d_kernel(kernel, data_format) padding = _preprocess_border_mode(padding) if dilation_rate == (1, 1): strides = (1,) + strides x = C.convolution( kernel, x, strides, auto_padding=[ False, padding, padding]) else: assert dilation_rate[0] == dilation_rate[1] assert strides == (1, 1), 'Invalid strides for dilated convolution' x = C.convolution( kernel, x, strides=dilation_rate[0], auto_padding=[ False, padding, padding]) return _postprocess_conv2d_output(x, data_format) def separable_conv1d(x, depthwise_kernel, pointwise_kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): raise NotImplementedError def separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) x = _preprocess_conv2d_input(x, data_format) depthwise_kernel = _preprocess_conv2d_kernel(depthwise_kernel, data_format) depthwise_kernel = C.reshape(C.transpose(depthwise_kernel, (1, 0, 2, 3)), (-1, 1) + depthwise_kernel.shape[2:]) pointwise_kernel = _preprocess_conv2d_kernel(pointwise_kernel, data_format) padding = _preprocess_border_mode(padding) if dilation_rate == (1, 1): strides = (1,) + strides x = C.convolution(depthwise_kernel, x, strides=strides, auto_padding=[False, padding, padding], groups=x.shape[0]) x = C.convolution(pointwise_kernel, x, strides=(1, 1, 1), auto_padding=[False]) else: if dilation_rate[0] != dilation_rate[1]: raise ValueError('CNTK Backend: non-square dilation_rate is ' 'not supported.') if strides != (1, 1): raise ValueError('Invalid strides for dilated convolution') x = C.convolution(depthwise_kernel, x, strides=dilation_rate[0], auto_padding=[False, padding, padding]) x = C.convolution(pointwise_kernel, x, strides=(1, 1, 1), auto_padding=[False]) return _postprocess_conv2d_output(x, data_format) def depthwise_conv2d(x, depthwise_kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) x = _preprocess_conv2d_input(x, data_format) depthwise_kernel = _preprocess_conv2d_kernel(depthwise_kernel, data_format) depthwise_kernel = C.reshape(C.transpose(depthwise_kernel, (1, 0, 2, 3)), (-1, 1) + depthwise_kernel.shape[2:]) padding = _preprocess_border_mode(padding) if dilation_rate == (1, 1): strides = (1,) + strides x = C.convolution(depthwise_kernel, x, strides=strides, auto_padding=[False, padding, padding], groups=x.shape[0]) else: if dilation_rate[0] != dilation_rate[1]: raise ValueError('CNTK Backend: non-square dilation_rate is ' 'not supported.') if strides != (1, 1): raise ValueError('Invalid strides for dilated convolution') x = C.convolution(depthwise_kernel, x, strides=dilation_rate[0], auto_padding=[False, padding, padding], groups=x.shape[0]) return _postprocess_conv2d_output(x, data_format) def conv3d(x, kernel, strides=(1, 1, 1), padding='valid', data_format=None, dilation_rate=(1, 1, 1)): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) x = _preprocess_conv3d_input(x, data_format) kernel = _preprocess_conv3d_kernel(kernel, data_format) padding = _preprocess_border_mode(padding) strides = strides + (strides[0],) x = C.convolution( kernel, x, strides, auto_padding=[ False, padding, padding, padding]) return _postprocess_conv3d_output(x, data_format) def conv3d_transpose(x, kernel, output_shape, strides=(1, 1, 1), padding='valid', data_format=None): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) x = _preprocess_conv3d_input(x, data_format) kernel = _preprocess_conv3d_kernel(kernel, data_format) padding = _preprocess_border_mode(padding) strides = (1,) + strides # cntk output_shape does not include batch axis output_shape = output_shape[1:] # in keras2, need handle output shape in different format if data_format == 'channels_last': shape = list(output_shape) shape[0] = output_shape[3] shape[1] = output_shape[0] shape[2] = output_shape[1] shape[3] = output_shape[2] output_shape = tuple(shape) x = C.convolution_transpose( kernel, x, strides, auto_padding=[ False, padding, padding, padding], output_shape=output_shape) return _postprocess_conv3d_output(x, data_format) def pool2d(x, pool_size, strides=(1, 1), padding='valid', data_format=None, pool_mode='max'): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) padding = _preprocess_border_mode(padding) strides = strides pool_size = pool_size x = _preprocess_conv2d_input(x, data_format) if pool_mode == 'max': x = C.pooling( x, C.MAX_POOLING, pool_size, strides, auto_padding=[padding]) elif pool_mode == 'avg': x = C.pooling( x, C.AVG_POOLING, pool_size, strides, auto_padding=[padding]) else: raise ValueError('Invalid pooling mode: ' + str(pool_mode)) return _postprocess_conv2d_output(x, data_format) def pool3d(x, pool_size, strides=(1, 1, 1), padding='valid', data_format=None, pool_mode='max'): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) padding = _preprocess_border_mode(padding) x = _preprocess_conv3d_input(x, data_format) if pool_mode == 'max': x = C.pooling( x, C.MAX_POOLING, pool_size, strides, auto_padding=[padding]) elif pool_mode == 'avg': x = C.pooling( x, C.AVG_POOLING, pool_size, strides, auto_padding=[padding]) else: raise ValueError('Invalid pooling mode: ' + str(pool_mode)) return _postprocess_conv3d_output(x, data_format) def relu(x, alpha=0., max_value=None): if alpha != 0.: negative_part = C.relu(-x) x = C.relu(x) if max_value is not None: x = C.clip(x, 0.0, max_value) if alpha != 0.: x -= alpha * negative_part return x def dropout(x, level, noise_shape=None, seed=None): if level < 0. or level >= 1: raise ValueError('CNTK Backend: Invalid dropout level %s, ' 'must be in interval [0, 1].' % level) return C.dropout(x, level) def batch_flatten(x): # cntk's batch axis is not in shape, # so just flatten all the dim in x.shape dim = np.prod(x.shape) x = C.reshape(x, (-1,)) x._keras_shape = (None, dim) return x def softmax(x, axis=-1): return C.softmax(x, axis=axis) def softplus(x): return C.softplus(x) def softsign(x): return x / (1 + C.abs(x)) def categorical_crossentropy(target, output, from_logits=False): if from_logits: result = C.cross_entropy_with_softmax(output, target) # cntk's result shape is (batch, 1), while keras expect (batch, ) return C.reshape(result, ()) else: # scale preds so that the class probas of each sample sum to 1 output /= C.reduce_sum(output, axis=-1) # avoid numerical instability with epsilon clipping output = C.clip(output, epsilon(), 1.0 - epsilon()) return -sum(target * C.log(output), axis=-1) def sparse_categorical_crossentropy(target, output, from_logits=False): target = C.one_hot(target, output.shape[-1]) target = C.reshape(target, output.shape) return categorical_crossentropy(target, output, from_logits) class Function(object): def __init__(self, inputs, outputs, updates=[], **kwargs): self.placeholders = inputs self.trainer = None self.unrelated_updates = None self.updates = updates if len(updates) > 0: assert len(outputs) > 0 self.loss = outputs[0] # need group update by gradient place holder u_ops = [] unrelated_updates = [] for update in updates: if isinstance(update, tuple): if len(update) != 2: raise NotImplementedError else: u = C.assign(update[0], update[1]) else: u = update if len(u.arguments) == 0: u_ops.append(u) else: unrelated_updates.append(u) update_func = C.combine([u.output for u in u_ops]) grads = update_func.find_all_with_name('keras_grad_placeholder') u_list = [] p_list = [] for g in grads: if g in grad_parameter_dict: p_list.append(grad_parameter_dict[g]) u_list.append(g) else: raise ValueError( 'CNTK backend: when constructing trainer, ' 'found gradient node `%s` which is not ' 'related to any parameters in the model. ' 'Please double check how the gradient node ' 'is constructed.' % g) if len(u_list) > 0: learner = C.cntk_py.universal_learner(p_list, u_list, update_func) criterion = ( outputs[0], outputs[1]) if len(outputs) > 1 else ( outputs[0], ) self.trainer = C.trainer.Trainer( outputs[0], criterion, [learner]) self.trainer_output = tuple([f.output for f in criterion]) elif len(u_ops) > 0: unrelated_updates.extend(u_ops) if len(unrelated_updates) > 0: self.unrelated_updates = C.combine([_.output for _ in unrelated_updates]) if self.trainer is None: self.metrics_outputs = [f.output for f in outputs] self.metrics_func = C.combine(self.metrics_outputs) # cntk only could handle loss and 1 metric in trainer, for metrics more # than 2, need manual eval elif len(outputs) > 2: self.metrics_outputs = [f.output for f in outputs[2:]] self.metrics_func = C.combine(self.metrics_outputs) else: self.metrics_func = None @staticmethod def _is_input_shape_compatible(input, placeholder): if hasattr(input, 'shape') and hasattr(placeholder, 'shape'): num_dynamic = get_num_dynamic_axis(placeholder) input_shape = input.shape[num_dynamic:] placeholder_shape = placeholder.shape for i, p in zip(input_shape, placeholder_shape): if i != p and p != C.InferredDimension and p != C.FreeDimension: return False return True def __call__(self, inputs): global _LEARNING_PHASE_PLACEHOLDER global _LEARNING_PHASE assert isinstance(inputs, (list, tuple)) feed_dict = {} for tensor, value in zip(self.placeholders, inputs): # cntk only support calculate on float, do auto cast here if (hasattr(value, 'dtype') and value.dtype != np.float32 and value.dtype != np.float64): value = value.astype(np.float32) if tensor == _LEARNING_PHASE_PLACEHOLDER: _LEARNING_PHASE_PLACEHOLDER.value = np.asarray(value) else: # in current version cntk can't support input with variable # length. Will support it in next release. if not self._is_input_shape_compatible(value, tensor): raise ValueError('CNTK backend: The placeholder has been resolved ' 'to shape `%s`, but input shape is `%s`. Currently ' 'CNTK can not take variable length inputs. Please ' 'pass inputs that have a static shape.' % (str(tensor.shape), str(value.shape))) feed_dict[tensor] = value updated = [] if self.trainer is not None: input_dict = {} for argument in self.loss.arguments: if argument in feed_dict: input_dict[argument] = feed_dict[argument] else: raise ValueError( 'CNTK backend: argument %s is not found in inputs. ' 'Please double check the model and inputs in ' '`train_function`.' % argument.name) result = self.trainer.train_minibatch( input_dict, self.trainer_output) assert(len(result) == 2) outputs = result[1] for o in self.trainer_output: updated.append(outputs[o]) if self.metrics_func is not None: input_dict = {} for argument in self.metrics_func.arguments: if argument in feed_dict: input_dict[argument] = feed_dict[argument] else: raise ValueError('CNTK backend: metrics argument %s ' 'is not found in inputs. Please double ' 'check the model and inputs.' % argument.name) # Some ops (like dropout) won't be applied during "eval" in cntk. # They only evaluated in training phase. To make it work, call # "forward" method to let cntk know we want to evaluate them.from # But the assign ops won't be executed under this mode, that's why # we need this check. if (self.unrelated_updates is None and (_LEARNING_PHASE_PLACEHOLDER.value == 1.0 or _LEARNING_PHASE == 1)): _, output_values = self.metrics_func.forward( input_dict, self.metrics_func.outputs, (self.metrics_func.outputs[0],), as_numpy=False) else: output_values = self.metrics_func.eval(input_dict, as_numpy=False) if isinstance(output_values, dict): for o in self.metrics_outputs: value = output_values[o] v = value.asarray() updated.append(v) else: v = output_values.asarray() for o in self.metrics_outputs: updated.append(v) if self.unrelated_updates is not None: input_dict = {} for argument in self.unrelated_updates.arguments: if argument in feed_dict: input_dict[argument] = feed_dict[argument] else: raise ValueError( 'CNTK backend: assign ops argument %s ' 'is not found in inputs. Please double ' 'check the model and inputs.' % argument.name) self.unrelated_updates.eval(input_dict, as_numpy=False) return updated def function(inputs, outputs, updates=[], **kwargs): return Function(inputs, outputs, updates=updates, **kwargs) def temporal_padding(x, padding=(1, 1)): assert len(padding) == 2 num_dynamic_axis = _get_dynamic_axis_num(x) base_shape = x.shape if num_dynamic_axis > 0: assert len(base_shape) == 2 if hasattr(C, 'pad'): x = C.pad(x, pattern=[padding, (0, 0)]) else: x = _padding(x, padding, 0) else: assert len(base_shape) == 3 if hasattr(C, 'pad'): x = C.pad(x, pattern=[(0, 0), padding, (0, 0)]) else: x = _padding(x, padding, 1) return x def _padding(x, pattern, axis): base_shape = x.shape if b_any([dim < 0 for dim in base_shape]): raise ValueError('CNTK Backend: padding input tensor with ' 'shape `%s` contains non-specified dimension, ' 'which is not supported. Please give fixed ' 'dimension to enable padding.' % base_shape) if pattern[0] > 0: prefix_shape = list(base_shape) prefix_shape[axis] = pattern[0] prefix_shape = tuple(prefix_shape) x = C.splice(C.constant(value=0, shape=prefix_shape), x, axis=axis) base_shape = x.shape if pattern[1] > 0: postfix_shape = list(base_shape) postfix_shape[axis] = pattern[1] postfix_shape = tuple(postfix_shape) x = C.splice(x, C.constant(value=0, shape=postfix_shape), axis=axis) return x def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None): assert len(padding) == 2 assert len(padding[0]) == 2 assert len(padding[1]) == 2 if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) num_dynamic_axis = _get_dynamic_axis_num(x) base_shape = x.shape if data_format == 'channels_first': if num_dynamic_axis > 0: assert len(base_shape) == 3 if hasattr(C, 'pad'): x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1])]) else: x = _padding(x, padding[0], 1) x = _padding(x, padding[1], 2) else: assert len(base_shape) == 4 if hasattr(C, 'pad'): x = C.pad(x, pattern=[[0, 0], [0, 0], list(padding[0]), list(padding[1])]) else: x = _padding(x, padding[0], 2) x = _padding(x, padding[1], 3) else: if num_dynamic_axis > 0: assert len(base_shape) == 3 if hasattr(C, 'pad'): x = C.pad(x, pattern=[list(padding[0]), list(padding[1]), [0, 0]]) else: x = _padding(x, padding[0], 0) x = _padding(x, padding[1], 1) else: assert len(base_shape) == 4 if hasattr(C, 'pad'): x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]), [0, 0]]) else: x = _padding(x, padding[0], 1) x = _padding(x, padding[1], 2) return x def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None): assert len(padding) == 3 assert len(padding[0]) == 2 assert len(padding[1]) == 2 assert len(padding[2]) == 2 if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) num_dynamic_axis = _get_dynamic_axis_num(x) base_shape = x.shape if data_format == 'channels_first': if num_dynamic_axis > 0: assert len(base_shape) == 4 if hasattr(C, 'pad'): x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]), list(padding[2])]) else: x = _padding(x, padding[0], 1) x = _padding(x, padding[1], 2) x = _padding(x, padding[2], 3) else: assert len(base_shape) == 5 if hasattr(C, 'pad'): x = C.pad(x, pattern=[[0, 0], [0, 0], list(padding[0]), list(padding[1]), list(padding[2])]) else: x = _padding(x, padding[0], 2) x = _padding(x, padding[1], 3) x = _padding(x, padding[2], 4) else: if num_dynamic_axis > 0: assert len(base_shape) == 4 if hasattr(C, 'pad'): x = C.pad(x, pattern=[list(padding[0]), list(padding[1]), list(padding[2]), [0, 0]]) else: x = _padding(x, padding[0], 0) x = _padding(x, padding[1], 1) x = _padding(x, padding[2], 2) else: assert len(base_shape) == 5 if hasattr(C, 'pad'): x = C.pad(x, pattern=[[0, 0], list(padding[0]), list(padding[1]), list(padding[2]), [0, 0]]) else: x = _padding(x, padding[0], 1) x = _padding(x, padding[1], 2) x = _padding(x, padding[2], 3) return x def one_hot(indices, num_classes): return C.one_hot(indices, num_classes) def get_value(x): if isinstance( x, C.variables.Parameter) or isinstance( x, C.variables.Constant): return x.value else: return eval(x) def batch_get_value(xs): result = [] for x in xs: if (isinstance(x, C.variables.Parameter) or isinstance(x, C.variables.Constant)): result.append(x.value) else: result.append(eval(x)) return result def set_value(x, value): if (isinstance(x, C.variables.Parameter) or isinstance(x, C.variables.Constant)): if isinstance(value, (float, int)): value = np.full(x.shape, value, dtype=floatx()) x.value = value else: raise NotImplementedError def print_tensor(x, message=''): return C.user_function( LambdaFunc(x, when=lambda x: True, execute=lambda x: print(message))) def batch_set_value(tuples): for t in tuples: x = t[0] value = t[1] if isinstance(value, np.ndarray) is False: value = np.asarray(value) if isinstance(x, C.variables.Parameter): x.value = value else: raise NotImplementedError def stop_gradient(variables): if isinstance(variables, (list, tuple)): return map(C.stop_gradient, variables) else: return C.stop_gradient(variables) def switch(condition, then_expression, else_expression): ndim_cond = ndim(condition) ndim_expr = ndim(then_expression) if ndim_cond > ndim_expr: raise ValueError('Rank of condition should be less' ' than or equal to rank of then and' ' else expressions. ndim(condition)=' + str(ndim_cond) + ', ndim(then_expression)' '=' + str(ndim_expr)) elif ndim_cond < ndim_expr: shape_expr = int_shape(then_expression) ndim_diff = ndim_expr - ndim_cond for i in range(ndim_diff): condition = expand_dims(condition) condition = tile(condition, shape_expr[ndim_cond + i]) return C.element_select(condition, then_expression, else_expression) def elu(x, alpha=1.): res = C.elu(x) if alpha == 1: return res else: return C.element_select(C.greater(x, 0), res, alpha * res) def in_top_k(predictions, targets, k): _targets = C.one_hot(targets, predictions.shape[-1]) result = C.classification_error(predictions, _targets, topN=k) return 1 - C.reshape(result, shape=()) def conv2d_transpose(x, kernel, output_shape, strides=(1, 1), padding='valid', data_format=None): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) x = _preprocess_conv2d_input(x, data_format) kernel = _preprocess_conv2d_kernel(kernel, data_format) padding = _preprocess_border_mode(padding) strides = (1,) + strides # cntk output_shape does not include batch axis output_shape = output_shape[1:] # in keras2, need handle output shape in different format if data_format == 'channels_last': shape = list(output_shape) shape[0] = output_shape[2] shape[1] = output_shape[0] shape[2] = output_shape[1] output_shape = tuple(shape) x = C.convolution_transpose( kernel, x, strides, auto_padding=[ False, padding, padding], output_shape=output_shape) return _postprocess_conv2d_output(x, data_format) def identity(x, name=None): if name is None: name = '%s_alias' % x.name return C.alias(x, name=name) def _preprocess_conv2d_input(x, data_format): if data_format == 'channels_last': # TF uses the last dimension as channel dimension, # instead of the 2nd one. # TH input shape: (samples, input_depth, rows, cols) # TF input shape: (samples, rows, cols, input_depth) x = C.transpose(x, (2, 0, 1)) return x def _preprocess_conv2d_kernel(kernel, data_format): # As of Keras 2.0.0, all kernels are normalized # on the format `(rows, cols, input_depth, depth)`, # independently of `data_format`. # CNTK expects `(depth, input_depth, rows, cols)`. kernel = C.transpose(kernel, (3, 2, 0, 1)) return kernel def _preprocess_border_mode(padding): if padding == 'same': padding = True elif padding == 'valid': padding = False else: raise ValueError('Invalid border mode: ' + str(padding)) return padding def _postprocess_conv2d_output(x, data_format): if data_format == 'channels_last': x = C.transpose(x, (1, 2, 0)) return x def _preprocess_conv3d_input(x, data_format): if data_format == 'channels_last': # TF uses the last dimension as channel dimension, # instead of the 2nd one. # TH input shape: (samples, input_depth, conv_dim1, conv_dim2, conv_dim3) # TF input shape: (samples, conv_dim1, conv_dim2, conv_dim3, # input_depth) x = C.transpose(x, (3, 0, 1, 2)) return x def _preprocess_conv3d_kernel(kernel, dim_ordering): kernel = C.transpose(kernel, (4, 3, 0, 1, 2)) return kernel def _postprocess_conv3d_output(x, dim_ordering): if dim_ordering == 'channels_last': x = C.transpose(x, (1, 2, 3, 0)) return x def _get_dynamic_axis_num(x): if hasattr(x, 'dynamic_axes'): return len(x.dynamic_axes) else: return 0 def _contain_seqence_axis(x): if _get_dynamic_axis_num(x) > 1: return x.dynamic_axes[1] == C.Axis.default_dynamic_axis() else: return False def get_num_dynamic_axis(x): return _get_dynamic_axis_num(x) def _reduce_on_axis(x, axis, reduce_fun_name): if isinstance(axis, list): for a in axis: if isinstance(a, C.Axis) \ and a != C.Axis.default_batch_axis() \ and hasattr(C.sequence, reduce_fun_name): x = getattr(C.sequence, reduce_fun_name)(x, a) else: x = getattr(C, reduce_fun_name)(x, a) else: x = getattr(C, reduce_fun_name)(x, axis) return x def _reshape_sequence(x, time_step): tmp_shape = list(int_shape(x)) tmp_shape[1] = time_step return reshape(x, tmp_shape) def local_conv1d(inputs, kernel, kernel_size, strides, data_format=None): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) stride = strides[0] kernel_shape = int_shape(kernel) output_length, feature_dim, filters = kernel_shape xs = [] for i in range(output_length): slice_length = slice(i * stride, i * stride + kernel_size[0]) xs.append(reshape(inputs[:, slice_length, :], (-1, 1, feature_dim))) x_aggregate = concatenate(xs, axis=1) # transpose kernel to output_filters first, to apply broadcast weight = permute_dimensions(kernel, (2, 0, 1)) # Shape: (batch, filters, output_length, input_length * kernel_size) output = x_aggregate * weight # Shape: (batch, filters, output_length) output = sum(output, axis=3) # Shape: (batch, output_length, filters) return permute_dimensions(output, (0, 2, 1)) def local_conv2d(inputs, kernel, kernel_size, strides, output_shape, data_format=None): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format ' + str(data_format)) stride_row, stride_col = strides output_row, output_col = output_shape kernel_shape = int_shape(kernel) _, feature_dim, filters = kernel_shape xs = [] for i in range(output_row): for j in range(output_col): slice_row = slice(i * stride_row, i * stride_row + kernel_size[0]) slice_col = slice(j * stride_col, j * stride_col + kernel_size[1]) if data_format == 'channels_first': xs.append(reshape(inputs[:, :, slice_row, slice_col], (-1, 1, feature_dim))) else: xs.append(reshape(inputs[:, slice_row, slice_col, :], (-1, 1, feature_dim))) x_aggregate = concatenate(xs, axis=1) # transpose kernel to put filters first weight = permute_dimensions(kernel, (2, 0, 1)) # shape: batch, filters, output_length, input_length * kernel_size output = x_aggregate * weight # shape: batch, filters, output_length output = sum(output, axis=3) # shape: batch, filters, row, col output = reshape(output, (-1, filters, output_row, output_col)) if data_format == 'channels_last': # shape: batch, row, col, filters output = permute_dimensions(output, (0, 2, 3, 1)) return output def reverse(x, axes): if isinstance(axes, int): axes = [axes] cntk_axes = _normalize_axis(axes, x) begin_index = [0 for _ in cntk_axes] end_index = [0 for _ in cntk_axes] strides = [-1 for _ in cntk_axes] return C.slice(x, cntk_axes, begin_index, end_index, strides) def _reshape_batch(x, shape): # there is a bug in cntk 2.1's unpack_batch implementation if hasattr(C, 'unpack_batch') and _get_cntk_version() >= 2.2: const_a = C.unpack_batch(x) const_a = C.reshape(const_a, shape) return C.to_batch(const_a) else: return C.user_function(ReshapeBatch(x, shape[1:])) def _get_cntk_version(): version = C.__version__ if version.endswith('+'): version = version[:-1] # for hot fix, ignore all the . except the first one. if len(version) > 2 and version[1] == '.': version = version[:2] + version[2:].replace('.', '') try: return float(version) except: warnings.warn( 'CNTK backend warning: CNTK version not detected. ' 'Will using CNTK 2.0 GA as default.') return float(2.0) class ReshapeBatch(C.ops.functions.UserFunction): def __init__(self, input, shape, name='reshape_with_batch'): super(ReshapeBatch, self).__init__([input], as_numpy=False, name=name) self.from_shape = input.shape self.target_shape = shape def infer_outputs(self): batch_axis = C.Axis.default_batch_axis() return [ C.output_variable( self.target_shape, self.inputs[0].dtype, [batch_axis])] def forward(self, arguments, device=None, outputs_to_retain=None): num_element = arguments.shape()[0] * np.prod(np.asarray(self.from_shape)) num_static_element = np.prod(
np.asarray(self.target_shape)
numpy.asarray
# ________ # / # \ / # \ / # \/ import random import textwrap import emd_mean import AdvEMDpy import emd_basis import emd_utils import numpy as np import pandas as pd import cvxpy as cvx import seaborn as sns import matplotlib.pyplot as plt from scipy.integrate import odeint from scipy.ndimage import gaussian_filter from emd_utils import time_extension, Utility from scipy.interpolate import CubicSpline from emd_hilbert import Hilbert, hilbert_spectrum from emd_preprocess import Preprocess from emd_mean import Fluctuation from AdvEMDpy import EMD # alternate packages from PyEMD import EMD as pyemd0215 import emd as emd040 sns.set(style='darkgrid') pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001) pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time) pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series) # plot 0 - addition fig = plt.figure(figsize=(9, 4)) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('First Iteration of Sifting Algorithm') plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1) plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()], c='r', label=r'$M(t_i)$', zorder=2) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4) plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()], c='c', label=r'$m(t_j)$', zorder=3) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5) plt.yticks(ticks=[-2, -1, 0, 1, 2]) plt.xticks(ticks=[0, np.pi, 2 * np.pi], labels=[r'0', r'$\pi$', r'$2\pi$']) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/pseudo_algorithm.png') plt.show() knots = np.arange(12) time = np.linspace(0, 11, 1101) basis = emd_basis.Basis(time=time, time_series=time) b_spline_basis = basis.cubic_b_spline(knots) chsi_basis = basis.chsi_basis(knots) # plot 1 plt.title('Non-Natural Cubic B-Spline Bases at Boundary') plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $') plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $') plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $') plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $') plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $') plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $']) plt.xlim(4.4, 6.6) plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.legend(loc='upper left') plt.savefig('jss_figures/boundary_bases.png') plt.show() # plot 1a - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) knots_uniform = np.linspace(0, 2 * np.pi, 51) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0] fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Uniform Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Uniform Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Uniform Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots_uniform)): axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_uniform.png') plt.show() # plot 1b - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=1, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Statically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Statically Optimised Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Statically Optimised Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots)): axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_1.png') plt.show() # plot 1c - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=2, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Dynamically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Dynamically Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Dynamically Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots[i])): axs[i].plot(knots[i][j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_2.png') plt.show() # plot 1d - addition window = 81 fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Filtering Demonstration') axs[1].set_title('Zoomed Region') preprocess_time = pseudo_alg_time.copy() np.random.seed(1) random.seed(1) preprocess_time_series = pseudo_alg_time_series + np.random.normal(0, 0.1, len(preprocess_time)) for i in random.sample(range(1000), 500): preprocess_time_series[i] += np.random.normal(0, 1) preprocess = Preprocess(time=preprocess_time, time_series=preprocess_time_series) axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[0].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[0].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[0].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[1].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[1].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[1].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_filter.png') plt.show() # plot 1e - addition fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Smoothing Demonstration') axs[1].set_title('Zoomed Region') axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[0].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) downsampled_and_decimated = preprocess.downsample() axs[0].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 11)) downsampled = preprocess.downsample(decimate=False) axs[0].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[1].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) axs[1].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 13)) axs[1].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.06, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.06, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_smooth.png') plt.show() # plot 2 fig, axs = plt.subplots(1, 2, sharey=True) axs[0].set_title('Cubic B-Spline Bases') axs[0].plot(time, b_spline_basis[2, :].T, '--', label='Basis 1') axs[0].plot(time, b_spline_basis[3, :].T, '--', label='Basis 2') axs[0].plot(time, b_spline_basis[4, :].T, '--', label='Basis 3') axs[0].plot(time, b_spline_basis[5, :].T, '--', label='Basis 4') axs[0].legend(loc='upper left') axs[0].plot(5 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].plot(6 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].set_xticks([5, 6]) axs[0].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[0].set_xlim(4.5, 6.5) axs[1].set_title('Cubic Hermite Spline Bases') axs[1].plot(time, chsi_basis[10, :].T, '--') axs[1].plot(time, chsi_basis[11, :].T, '--') axs[1].plot(time, chsi_basis[12, :].T, '--') axs[1].plot(time, chsi_basis[13, :].T, '--') axs[1].plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].set_xticks([5, 6]) axs[1].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[1].set_xlim(4.5, 6.5) plt.savefig('jss_figures/comparing_bases.png') plt.show() # plot 3 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_dash = maxima_y[-1] * np.ones_like(max_dash_time) min_dash_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_dash = minima_y[-1] * np.ones_like(min_dash_time) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) max_discard = maxima_y[-1] max_discard_time = minima_x[-1] - maxima_x[-1] + minima_x[-1] max_discard_dash_time = np.linspace(max_discard_time - width, max_discard_time + width, 101) max_discard_dash = max_discard * np.ones_like(max_discard_dash_time) dash_2_time = np.linspace(minima_x[-1], max_discard_time, 101) dash_2 = np.linspace(minima_y[-1], max_discard, 101) end_point_time = time[-1] end_point = time_series[-1] time_reflect = np.linspace((5 - a) * np.pi, (5 + a) * np.pi, 101) time_series_reflect = np.flip(np.cos(np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101)) + np.cos(5 * np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101))) time_series_anti_reflect = time_series_reflect[0] - time_series_reflect utils = emd_utils.Utility(time=time, time_series=time_series_anti_reflect) anti_max_bool = utils.max_bool_func_1st_order_fd() anti_max_point_time = time_reflect[anti_max_bool] anti_max_point = time_series_anti_reflect[anti_max_bool] utils = emd_utils.Utility(time=time, time_series=time_series_reflect) no_anchor_max_time = time_reflect[utils.max_bool_func_1st_order_fd()] no_anchor_max = time_series_reflect[utils.max_bool_func_1st_order_fd()] point_1 = 5.4 length_distance = np.linspace(maxima_y[-1], minima_y[-1], 101) length_distance_time = point_1 * np.pi * np.ones_like(length_distance) length_time = np.linspace(point_1 * np.pi - width, point_1 * np.pi + width, 101) length_top = maxima_y[-1] * np.ones_like(length_time) length_bottom = minima_y[-1] * np.ones_like(length_time) point_2 = 5.2 length_distance_2 = np.linspace(time_series[-1], minima_y[-1], 101) length_distance_time_2 = point_2 * np.pi * np.ones_like(length_distance_2) length_time_2 = np.linspace(point_2 * np.pi - width, point_2 * np.pi + width, 101) length_top_2 = time_series[-1] * np.ones_like(length_time_2) length_bottom_2 = minima_y[-1] * np.ones_like(length_time_2) symmetry_axis_1_time = minima_x[-1] * np.ones(101) symmetry_axis_2_time = time[-1] * np.ones(101) symmetry_axis = np.linspace(-2, 2, 101) end_time = np.linspace(time[-1] - width, time[-1] + width, 101) end_signal = time_series[-1] * np.ones_like(end_time) anti_symmetric_time = np.linspace(time[-1] - 0.5, time[-1] + 0.5, 101) anti_symmetric_signal = time_series[-1] * np.ones_like(anti_symmetric_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Symmetry Edge Effects Example') plt.plot(time_reflect, time_series_reflect, 'g--', LineWidth=2, label=textwrap.fill('Symmetric signal', 10)) plt.plot(time_reflect[:51], time_series_anti_reflect[:51], '--', c='purple', LineWidth=2, label=textwrap.fill('Anti-symmetric signal', 10)) plt.plot(max_dash_time, max_dash, 'k-') plt.plot(min_dash_time, min_dash, 'k-') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(length_distance_time, length_distance, 'k--') plt.plot(length_distance_time_2, length_distance_2, 'k--') plt.plot(length_time, length_top, 'k-') plt.plot(length_time, length_bottom, 'k-') plt.plot(length_time_2, length_top_2, 'k-') plt.plot(length_time_2, length_bottom_2, 'k-') plt.plot(end_time, end_signal, 'k-') plt.plot(symmetry_axis_1_time, symmetry_axis, 'r--', zorder=1) plt.plot(anti_symmetric_time, anti_symmetric_signal, 'r--', zorder=1) plt.plot(symmetry_axis_2_time, symmetry_axis, 'r--', label=textwrap.fill('Axes of symmetry', 10), zorder=1) plt.text(5.1 * np.pi, -0.7, r'$\beta$L') plt.text(5.34 * np.pi, -0.05, 'L') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(max_discard_time, max_discard, c='purple', zorder=4, label=textwrap.fill('Symmetric Discard maxima', 10)) plt.scatter(end_point_time, end_point, c='orange', zorder=4, label=textwrap.fill('Symmetric Anchor maxima', 10)) plt.scatter(anti_max_point_time, anti_max_point, c='green', zorder=4, label=textwrap.fill('Anti-Symmetric maxima', 10)) plt.scatter(no_anchor_max_time, no_anchor_max, c='gray', zorder=4, label=textwrap.fill('Symmetric maxima', 10)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_symmetry_anti.png') plt.show() # plot 4 a = 0.21 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_1 = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_dash_2 = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_dash_time_1 = maxima_x[-1] * np.ones_like(max_dash_1) max_dash_time_2 = maxima_x[-2] * np.ones_like(max_dash_1) min_dash_1 = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_dash_2 = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_dash_time_1 = minima_x[-1] * np.ones_like(min_dash_1) min_dash_time_2 = minima_x[-2] * np.ones_like(min_dash_1) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) dash_2_time = np.linspace(maxima_x[-1], minima_x[-2], 101) dash_2 = np.linspace(maxima_y[-1], minima_y[-2], 101) s1 = (minima_y[-2] - maxima_y[-1]) / (minima_x[-2] - maxima_x[-1]) slope_based_maximum_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) slope_based_maximum = minima_y[-1] + (slope_based_maximum_time - minima_x[-1]) * s1 max_dash_time_3 = slope_based_maximum_time * np.ones_like(max_dash_1) max_dash_3 = np.linspace(slope_based_maximum - width, slope_based_maximum + width, 101) dash_3_time = np.linspace(minima_x[-1], slope_based_maximum_time, 101) dash_3 = np.linspace(minima_y[-1], slope_based_maximum, 101) s2 = (minima_y[-1] - maxima_y[-1]) / (minima_x[-1] - maxima_x[-1]) slope_based_minimum_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) slope_based_minimum = slope_based_maximum - (slope_based_maximum_time - slope_based_minimum_time) * s2 min_dash_time_3 = slope_based_minimum_time * np.ones_like(min_dash_1) min_dash_3 = np.linspace(slope_based_minimum - width, slope_based_minimum + width, 101) dash_4_time = np.linspace(slope_based_maximum_time, slope_based_minimum_time) dash_4 = np.linspace(slope_based_maximum, slope_based_minimum) maxima_dash = np.linspace(2.5 - width, 2.5 + width, 101) maxima_dash_time_1 = maxima_x[-2] * np.ones_like(maxima_dash) maxima_dash_time_2 = maxima_x[-1] * np.ones_like(maxima_dash) maxima_dash_time_3 = slope_based_maximum_time * np.ones_like(maxima_dash) maxima_line_dash_time = np.linspace(maxima_x[-2], slope_based_maximum_time, 101) maxima_line_dash = 2.5 * np.ones_like(maxima_line_dash_time) minima_dash = np.linspace(-3.4 - width, -3.4 + width, 101) minima_dash_time_1 = minima_x[-2] * np.ones_like(minima_dash) minima_dash_time_2 = minima_x[-1] * np.ones_like(minima_dash) minima_dash_time_3 = slope_based_minimum_time * np.ones_like(minima_dash) minima_line_dash_time = np.linspace(minima_x[-2], slope_based_minimum_time, 101) minima_line_dash = -3.4 * np.ones_like(minima_line_dash_time) # slightly edit signal to make difference between slope-based method and improved slope-based method more clear time_series[time >= minima_x[-1]] = 1.5 * (time_series[time >= minima_x[-1]] - time_series[time == minima_x[-1]]) + \ time_series[time == minima_x[-1]] improved_slope_based_maximum_time = time[-1] improved_slope_based_maximum = time_series[-1] improved_slope_based_minimum_time = slope_based_minimum_time improved_slope_based_minimum = improved_slope_based_maximum + s2 * (improved_slope_based_minimum_time - improved_slope_based_maximum_time) min_dash_4 = np.linspace(improved_slope_based_minimum - width, improved_slope_based_minimum + width, 101) min_dash_time_4 = improved_slope_based_minimum_time * np.ones_like(min_dash_4) dash_final_time = np.linspace(improved_slope_based_maximum_time, improved_slope_based_minimum_time, 101) dash_final = np.linspace(improved_slope_based_maximum, improved_slope_based_minimum, 101) ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 0.9 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Slope-Based Edge Effects Example') plt.plot(max_dash_time_1, max_dash_1, 'k-') plt.plot(max_dash_time_2, max_dash_2, 'k-') plt.plot(max_dash_time_3, max_dash_3, 'k-') plt.plot(min_dash_time_1, min_dash_1, 'k-') plt.plot(min_dash_time_2, min_dash_2, 'k-') plt.plot(min_dash_time_3, min_dash_3, 'k-') plt.plot(min_dash_time_4, min_dash_4, 'k-') plt.plot(maxima_dash_time_1, maxima_dash, 'k-') plt.plot(maxima_dash_time_2, maxima_dash, 'k-') plt.plot(maxima_dash_time_3, maxima_dash, 'k-') plt.plot(minima_dash_time_1, minima_dash, 'k-') plt.plot(minima_dash_time_2, minima_dash, 'k-') plt.plot(minima_dash_time_3, minima_dash, 'k-') plt.text(4.34 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.74 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.12 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.50 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.30 * np.pi, 0.35, r'$s_1$') plt.text(4.43 * np.pi, -0.20, r'$s_2$') plt.text(4.30 * np.pi + (minima_x[-1] - minima_x[-2]), 0.35 + (minima_y[-1] - minima_y[-2]), r'$s_1$') plt.text(4.43 * np.pi + (slope_based_minimum_time - minima_x[-1]), -0.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.text(4.50 * np.pi + (slope_based_minimum_time - minima_x[-1]), 1.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.plot(minima_line_dash_time, minima_line_dash, 'k--') plt.plot(maxima_line_dash_time, maxima_line_dash, 'k--') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(dash_3_time, dash_3, 'k--') plt.plot(dash_4_time, dash_4, 'k--') plt.plot(dash_final_time, dash_final, 'k--') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(slope_based_maximum_time, slope_based_maximum, c='orange', zorder=4, label=textwrap.fill('Slope-based maximum', 11)) plt.scatter(slope_based_minimum_time, slope_based_minimum, c='purple', zorder=4, label=textwrap.fill('Slope-based minimum', 11)) plt.scatter(improved_slope_based_maximum_time, improved_slope_based_maximum, c='deeppink', zorder=4, label=textwrap.fill('Improved slope-based maximum', 11)) plt.scatter(improved_slope_based_minimum_time, improved_slope_based_minimum, c='dodgerblue', zorder=4, label=textwrap.fill('Improved slope-based minimum', 11)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-3, -2, -1, 0, 1, 2), ('-3', '-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_slope_based.png') plt.show() # plot 5 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] A2 = np.abs(maxima_y[-2] - minima_y[-2]) / 2 A1 = np.abs(maxima_y[-1] - minima_y[-1]) / 2 P2 = 2 * np.abs(maxima_x[-2] - minima_x[-2]) P1 = 2 * np.abs(maxima_x[-1] - minima_x[-1]) Huang_time = (P1 / P2) * (time[time >= maxima_x[-2]] - time[time == maxima_x[-2]]) + maxima_x[-1] Huang_wave = (A1 / A2) * (time_series[time >= maxima_x[-2]] - time_series[time == maxima_x[-2]]) + maxima_y[-1] Coughlin_time = Huang_time Coughlin_wave = A1 * np.cos(2 * np.pi * (1 / P1) * (Coughlin_time - Coughlin_time[0])) Average_max_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) Average_max = (maxima_y[-2] + maxima_y[-1]) / 2 Average_min_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) Average_min = (minima_y[-2] + minima_y[-1]) / 2 utils_Huang = emd_utils.Utility(time=time, time_series=Huang_wave) Huang_max_bool = utils_Huang.max_bool_func_1st_order_fd() Huang_min_bool = utils_Huang.min_bool_func_1st_order_fd() utils_Coughlin = emd_utils.Utility(time=time, time_series=Coughlin_wave) Coughlin_max_bool = utils_Coughlin.max_bool_func_1st_order_fd() Coughlin_min_bool = utils_Coughlin.min_bool_func_1st_order_fd() Huang_max_time = Huang_time[Huang_max_bool] Huang_max = Huang_wave[Huang_max_bool] Huang_min_time = Huang_time[Huang_min_bool] Huang_min = Huang_wave[Huang_min_bool] Coughlin_max_time = Coughlin_time[Coughlin_max_bool] Coughlin_max = Coughlin_wave[Coughlin_max_bool] Coughlin_min_time = Coughlin_time[Coughlin_min_bool] Coughlin_min = Coughlin_wave[Coughlin_min_bool] max_2_x_time = np.linspace(maxima_x[-2] - width, maxima_x[-2] + width, 101) max_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) max_2_x = maxima_y[-2] * np.ones_like(max_2_x_time) min_2_x_time = np.linspace(minima_x[-2] - width, minima_x[-2] + width, 101) min_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) min_2_x = minima_y[-2] * np.ones_like(min_2_x_time) dash_max_min_2_x = np.linspace(minima_y[-2], maxima_y[-2], 101) dash_max_min_2_x_time = 5.3 * np.pi * np.ones_like(dash_max_min_2_x) max_2_y = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) max_2_y_time = maxima_x[-2] * np.ones_like(max_2_y) min_2_y = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) min_2_y_time = minima_x[-2] * np.ones_like(min_2_y) dash_max_min_2_y_time = np.linspace(minima_x[-2], maxima_x[-2], 101) dash_max_min_2_y = -1.8 * np.ones_like(dash_max_min_2_y_time) max_1_x_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) max_1_x = maxima_y[-1] * np.ones_like(max_1_x_time) min_1_x_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) min_1_x = minima_y[-1] * np.ones_like(min_1_x_time) dash_max_min_1_x = np.linspace(minima_y[-1], maxima_y[-1], 101) dash_max_min_1_x_time = 5.4 * np.pi * np.ones_like(dash_max_min_1_x) max_1_y = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) max_1_y_time = maxima_x[-1] * np.ones_like(max_1_y) min_1_y = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) min_1_y_time = minima_x[-1] * np.ones_like(min_1_y) dash_max_min_1_y_time = np.linspace(minima_x[-1], maxima_x[-1], 101) dash_max_min_1_y = -2.1 * np.ones_like(dash_max_min_1_y_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Characteristic Wave Effects Example') plt.plot(time, time_series, LineWidth=2, label='Signal') plt.scatter(Huang_max_time, Huang_max, c='magenta', zorder=4, label=textwrap.fill('Huang maximum', 10)) plt.scatter(Huang_min_time, Huang_min, c='lime', zorder=4, label=textwrap.fill('Huang minimum', 10)) plt.scatter(Coughlin_max_time, Coughlin_max, c='darkorange', zorder=4, label=textwrap.fill('Coughlin maximum', 14)) plt.scatter(Coughlin_min_time, Coughlin_min, c='dodgerblue', zorder=4, label=textwrap.fill('Coughlin minimum', 14)) plt.scatter(Average_max_time, Average_max, c='orangered', zorder=4, label=textwrap.fill('Average maximum', 14)) plt.scatter(Average_min_time, Average_min, c='cyan', zorder=4, label=textwrap.fill('Average minimum', 14)) plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.plot(Huang_time, Huang_wave, '--', c='darkviolet', label=textwrap.fill('Huang Characteristic Wave', 14)) plt.plot(Coughlin_time, Coughlin_wave, '--', c='darkgreen', label=textwrap.fill('Coughlin Characteristic Wave', 14)) plt.plot(max_2_x_time, max_2_x, 'k-') plt.plot(max_2_x_time_side, max_2_x, 'k-') plt.plot(min_2_x_time, min_2_x, 'k-') plt.plot(min_2_x_time_side, min_2_x, 'k-') plt.plot(dash_max_min_2_x_time, dash_max_min_2_x, 'k--') plt.text(5.16 * np.pi, 0.85, r'$2a_2$') plt.plot(max_2_y_time, max_2_y, 'k-') plt.plot(max_2_y_time, max_2_y_side, 'k-') plt.plot(min_2_y_time, min_2_y, 'k-') plt.plot(min_2_y_time, min_2_y_side, 'k-') plt.plot(dash_max_min_2_y_time, dash_max_min_2_y, 'k--') plt.text(4.08 * np.pi, -2.2, r'$\frac{p_2}{2}$') plt.plot(max_1_x_time, max_1_x, 'k-') plt.plot(max_1_x_time_side, max_1_x, 'k-') plt.plot(min_1_x_time, min_1_x, 'k-') plt.plot(min_1_x_time_side, min_1_x, 'k-') plt.plot(dash_max_min_1_x_time, dash_max_min_1_x, 'k--') plt.text(5.42 * np.pi, -0.1, r'$2a_1$') plt.plot(max_1_y_time, max_1_y, 'k-') plt.plot(max_1_y_time, max_1_y_side, 'k-') plt.plot(min_1_y_time, min_1_y, 'k-') plt.plot(min_1_y_time, min_1_y_side, 'k-') plt.plot(dash_max_min_1_y_time, dash_max_min_1_y, 'k--') plt.text(4.48 * np.pi, -2.5, r'$\frac{p_1}{2}$') plt.xlim(3.9 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_characteristic_wave.png') plt.show() # plot 6 t = np.linspace(5, 95, 100) signal_orig = np.cos(2 * np.pi * t / 50) + 0.6 * np.cos(2 * np.pi * t / 25) + 0.5 * np.sin(2 * np.pi * t / 200) util_nn = emd_utils.Utility(time=t, time_series=signal_orig) maxima = signal_orig[util_nn.max_bool_func_1st_order_fd()] minima = signal_orig[util_nn.min_bool_func_1st_order_fd()] cs_max = CubicSpline(t[util_nn.max_bool_func_1st_order_fd()], maxima) cs_min = CubicSpline(t[util_nn.min_bool_func_1st_order_fd()], minima) time = np.linspace(0, 5 * np.pi, 1001) lsq_signal = np.cos(time) + np.cos(5 * time) knots = np.linspace(0, 5 * np.pi, 101) time_extended = time_extension(time) time_series_extended = np.zeros_like(time_extended) / 0 time_series_extended[int(len(lsq_signal) - 1):int(2 * (len(lsq_signal) - 1) + 1)] = lsq_signal neural_network_m = 200 neural_network_k = 100 # forward -> P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[(-(neural_network_m + neural_network_k - col)):(-(neural_network_m - col))] P[-1, col] = 1 # for additive constant t = lsq_signal[-neural_network_m:] # test - top seed_weights = np.ones(neural_network_k) / neural_network_k weights = 0 * seed_weights.copy() train_input = P[:-1, :] lr = 0.01 for iterations in range(1000): output = np.matmul(weights, train_input) error = (t - output) gradients = error * (- train_input) # guess average gradients average_gradients = np.mean(gradients, axis=1) # steepest descent max_gradient_vector = average_gradients * (np.abs(average_gradients) == max(np.abs(average_gradients))) adjustment = - lr * average_gradients # adjustment = - lr * max_gradient_vector weights += adjustment # test - bottom weights_right = np.hstack((weights, 0)) max_count_right = 0 min_count_right = 0 i_right = 0 while ((max_count_right < 1) or (min_count_right < 1)) and (i_right < len(lsq_signal) - 1): time_series_extended[int(2 * (len(lsq_signal) - 1) + 1 + i_right)] = \ sum(weights_right * np.hstack((time_series_extended[ int(2 * (len(lsq_signal) - 1) + 1 - neural_network_k + i_right): int(2 * (len(lsq_signal) - 1) + 1 + i_right)], 1))) i_right += 1 if i_right > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_right += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_right += 1 # backward <- P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[int(col + 1):int(col + neural_network_k + 1)] P[-1, col] = 1 # for additive constant t = lsq_signal[:neural_network_m] vx = cvx.Variable(int(neural_network_k + 1)) objective = cvx.Minimize(cvx.norm((2 * (vx * P) + 1 - t), 2)) # linear activation function is arbitrary prob = cvx.Problem(objective) result = prob.solve(verbose=True, solver=cvx.ECOS) weights_left = np.array(vx.value) max_count_left = 0 min_count_left = 0 i_left = 0 while ((max_count_left < 1) or (min_count_left < 1)) and (i_left < len(lsq_signal) - 1): time_series_extended[int(len(lsq_signal) - 2 - i_left)] = \ 2 * sum(weights_left * np.hstack((time_series_extended[int(len(lsq_signal) - 1 - i_left): int(len(lsq_signal) - 1 - i_left + neural_network_k)], 1))) + 1 i_left += 1 if i_left > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_left += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_left += 1 lsq_utils = emd_utils.Utility(time=time, time_series=lsq_signal) utils_extended = emd_utils.Utility(time=time_extended, time_series=time_series_extended) maxima = lsq_signal[lsq_utils.max_bool_func_1st_order_fd()] maxima_time = time[lsq_utils.max_bool_func_1st_order_fd()] maxima_extrapolate = time_series_extended[utils_extended.max_bool_func_1st_order_fd()][-1] maxima_extrapolate_time = time_extended[utils_extended.max_bool_func_1st_order_fd()][-1] minima = lsq_signal[lsq_utils.min_bool_func_1st_order_fd()] minima_time = time[lsq_utils.min_bool_func_1st_order_fd()] minima_extrapolate = time_series_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] minima_extrapolate_time = time_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Single Neuron Neural Network Example') plt.plot(time, lsq_signal, zorder=2, label='Signal') plt.plot(time_extended, time_series_extended, c='g', zorder=1, label=textwrap.fill('Extrapolated signal', 12)) plt.scatter(maxima_time, maxima, c='r', zorder=3, label='Maxima') plt.scatter(minima_time, minima, c='b', zorder=3, label='Minima') plt.scatter(maxima_extrapolate_time, maxima_extrapolate, c='magenta', zorder=3, label=textwrap.fill('Extrapolated maxima', 12)) plt.scatter(minima_extrapolate_time, minima_extrapolate, c='cyan', zorder=4, label=textwrap.fill('Extrapolated minima', 12)) plt.plot(((time[-302] + time[-301]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k', label=textwrap.fill('Neural network inputs', 13)) plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(((time_extended[-1001] + time_extended[-1002]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k') plt.plot(((time[-202] + time[-201]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed', label=textwrap.fill('Neural network targets', 13)) plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2), ((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2), ((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='gray') plt.plot(((time_extended[-1001] + time_extended[-1000]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed') plt.xlim(3.4 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/neural_network.png') plt.show() # plot 6a np.random.seed(0) time = np.linspace(0, 5 * np.pi, 1001) knots_51 = np.linspace(0, 5 * np.pi, 51) time_series = np.cos(2 * time) + np.cos(4 * time) + np.cos(8 * time) noise = np.random.normal(0, 1, len(time_series)) time_series += noise advemdpy = EMD(time=time, time_series=time_series) imfs_51, hts_51, ifs_51 = advemdpy.empirical_mode_decomposition(knots=knots_51, max_imfs=3, edge_effect='symmetric_anchor', verbose=False)[:3] knots_31 = np.linspace(0, 5 * np.pi, 31) imfs_31, hts_31, ifs_31 = advemdpy.empirical_mode_decomposition(knots=knots_31, max_imfs=2, edge_effect='symmetric_anchor', verbose=False)[:3] knots_11 = np.linspace(0, 5 * np.pi, 11) imfs_11, hts_11, ifs_11 = advemdpy.empirical_mode_decomposition(knots=knots_11, max_imfs=1, edge_effect='symmetric_anchor', verbose=False)[:3] fig, axs = plt.subplots(3, 1) plt.suptitle(textwrap.fill('Comparison of Trends Extracted with Different Knot Sequences', 40)) plt.subplots_adjust(hspace=0.1) axs[0].plot(time, time_series, label='Time series') axs[0].plot(time, imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 1, IMF 2, & IMF 3 with 51 knots', 21)) print(f'DFA fluctuation with 51 knots: {np.round(np.var(time_series - (imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :])), 3)}') for knot in knots_51: axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[0].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[0].set_xticklabels(['', '', '', '', '', '']) axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[0].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[0].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].plot(time, time_series, label='Time series') axs[1].plot(time, imfs_31[1, :] + imfs_31[2, :], label=textwrap.fill('Sum of IMF 1 and IMF 2 with 31 knots', 19)) axs[1].plot(time, imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 2 and IMF 3 with 51 knots', 19)) print(f'DFA fluctuation with 31 knots: {np.round(np.var(time_series - (imfs_31[1, :] + imfs_31[2, :])), 3)}') for knot in knots_31: axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[1].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[1].set_xticklabels(['', '', '', '', '', '']) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) axs[1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[1].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[1].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[1].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') axs[2].plot(time, time_series, label='Time series') axs[2].plot(time, imfs_11[1, :], label='IMF 1 with 11 knots') axs[2].plot(time, imfs_31[2, :], label='IMF 2 with 31 knots') axs[2].plot(time, imfs_51[3, :], label='IMF 3 with 51 knots') print(f'DFA fluctuation with 11 knots: {np.round(np.var(time_series - imfs_51[3, :]), 3)}') for knot in knots_11: axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[2].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[2].set_xticklabels(['$0$', r'$\pi$', r'$2\pi$', r'$3\pi$', r'$4\pi$', r'$5\pi$']) box_2 = axs[2].get_position() axs[2].set_position([box_2.x0 - 0.05, box_2.y0, box_2.width * 0.85, box_2.height]) axs[2].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[2].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[2].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[2].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[2].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') plt.savefig('jss_figures/DFA_different_trends.png') plt.show() # plot 6b fig, axs = plt.subplots(3, 1) plt.suptitle(textwrap.fill('Comparison of Trends Extracted with Different Knot Sequences Zoomed Region', 40)) plt.subplots_adjust(hspace=0.1) axs[0].plot(time, time_series, label='Time series') axs[0].plot(time, imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 1, IMF 2, & IMF 3 with 51 knots', 21)) for knot in knots_51: axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[0].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[0].set_xticklabels(['', '', '', '', '', '']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[0].set_ylim(-5.5, 5.5) axs[0].set_xlim(0.95 * np.pi, 1.55 * np.pi) axs[1].plot(time, time_series, label='Time series') axs[1].plot(time, imfs_31[1, :] + imfs_31[2, :], label=textwrap.fill('Sum of IMF 1 and IMF 2 with 31 knots', 19)) axs[1].plot(time, imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 2 and IMF 3 with 51 knots', 19)) for knot in knots_31: axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[1].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[1].set_xticklabels(['', '', '', '', '', '']) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) axs[1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].set_ylim(-5.5, 5.5) axs[1].set_xlim(0.95 * np.pi, 1.55 * np.pi) axs[2].plot(time, time_series, label='Time series') axs[2].plot(time, imfs_11[1, :], label='IMF 1 with 11 knots') axs[2].plot(time, imfs_31[2, :], label='IMF 2 with 31 knots') axs[2].plot(time, imfs_51[3, :], label='IMF 3 with 51 knots') for knot in knots_11: axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[2].set_xticks([np.pi, (3 / 2) * np.pi]) axs[2].set_xticklabels([r'$\pi$', r'$\frac{3}{2}\pi$']) box_2 = axs[2].get_position() axs[2].set_position([box_2.x0 - 0.05, box_2.y0, box_2.width * 0.85, box_2.height]) axs[2].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[2].set_ylim(-5.5, 5.5) axs[2].set_xlim(0.95 * np.pi, 1.55 * np.pi) plt.savefig('jss_figures/DFA_different_trends_zoomed.png') plt.show() hs_ouputs = hilbert_spectrum(time, imfs_51, hts_51, ifs_51, max_frequency=12, plot=False) # plot 6c ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 0.9 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Simple Sinusoidal Time Seres with Added Noise', 50)) x_hs, y, z = hs_ouputs z_min, z_max = 0, np.abs(z).max() ax.pcolormesh(x_hs, y, np.abs(z), cmap='gist_rainbow', vmin=z_min, vmax=z_max) ax.plot(x_hs[0, :], 8 * np.ones_like(x_hs[0, :]), '--', label=r'$\omega = 8$', Linewidth=3) ax.plot(x_hs[0, :], 4 * np.ones_like(x_hs[0, :]), '--', label=r'$\omega = 4$', Linewidth=3) ax.plot(x_hs[0, :], 2 * np.ones_like(x_hs[0, :]), '--', label=r'$\omega = 2$', Linewidth=3) ax.set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi]) ax.set_xticklabels(['$0$', r'$\pi$', r'$2\pi$', r'$3\pi$', r'$4\pi$']) plt.ylabel(r'Frequency (rad.s$^{-1}$)') plt.xlabel('Time (s)') box_0 = ax.get_position() ax.set_position([box_0.x0, box_0.y0 + 0.05, box_0.width * 0.85, box_0.height * 0.9]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/DFA_hilbert_spectrum.png') plt.show() # plot 6c time = np.linspace(0, 5 * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) knots = np.linspace(0, 5 * np.pi, 51) fluc = Fluctuation(time=time, time_series=time_series) max_unsmoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='maxima', smooth=False) max_smoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='maxima', smooth=True) min_unsmoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='minima', smooth=False) min_smoothed = fluc.envelope_basis_function_approximation(knots_for_envelope=knots, extrema_type='minima', smooth=True) util = Utility(time=time, time_series=time_series) maxima = util.max_bool_func_1st_order_fd() minima = util.min_bool_func_1st_order_fd() ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title(textwrap.fill('Plot Demonstrating Unsmoothed Extrema Envelopes if Schoenberg–Whitney Conditions are Not Satisfied', 50)) plt.plot(time, time_series, label='Time series', zorder=2, LineWidth=2) plt.scatter(time[maxima], time_series[maxima], c='r', label='Maxima', zorder=10) plt.scatter(time[minima], time_series[minima], c='b', label='Minima', zorder=10) plt.plot(time, max_unsmoothed[0], label=textwrap.fill('Unsmoothed maxima envelope', 10), c='darkorange') plt.plot(time, max_smoothed[0], label=textwrap.fill('Smoothed maxima envelope', 10), c='red') plt.plot(time, min_unsmoothed[0], label=textwrap.fill('Unsmoothed minima envelope', 10), c='cyan') plt.plot(time, min_smoothed[0], label=textwrap.fill('Smoothed minima envelope', 10), c='blue') for knot in knots[:-1]: plt.plot(knot * np.ones(101), np.linspace(-3.0, -2.0, 101), '--', c='grey', zorder=1) plt.plot(knots[-1] * np.ones(101), np.linspace(-3.0, -2.0, 101), '--', c='grey', label='Knots', zorder=1) plt.xticks((0, 1 * np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi), (r'$0$', r'$\pi$', r'2$\pi$', r'3$\pi$', r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) plt.xlim(-0.25 * np.pi, 5.25 * np.pi) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/Schoenberg_Whitney_Conditions.png') plt.show() # plot 7 a = 0.25 width = 0.2 time = np.linspace((0 + a) * np.pi, (5 - a) * np.pi, 1001) knots = np.linspace((0 + a) * np.pi, (5 - a) * np.pi, 11) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] inflection_bool = utils.inflection_point() inflection_x = time[inflection_bool] inflection_y = time_series[inflection_bool] fluctuation = emd_mean.Fluctuation(time=time, time_series=time_series) maxima_envelope = fluctuation.envelope_basis_function_approximation(knots, 'maxima', smooth=False, smoothing_penalty=0.2, edge_effect='none', spline_method='b_spline')[0] maxima_envelope_smooth = fluctuation.envelope_basis_function_approximation(knots, 'maxima', smooth=True, smoothing_penalty=0.2, edge_effect='none', spline_method='b_spline')[0] minima_envelope = fluctuation.envelope_basis_function_approximation(knots, 'minima', smooth=False, smoothing_penalty=0.2, edge_effect='none', spline_method='b_spline')[0] minima_envelope_smooth = fluctuation.envelope_basis_function_approximation(knots, 'minima', smooth=True, smoothing_penalty=0.2, edge_effect='none', spline_method='b_spline')[0] inflection_points_envelope = fluctuation.direct_detrended_fluctuation_estimation(knots, smooth=True, smoothing_penalty=0.2, technique='inflection_points')[0] binomial_points_envelope = fluctuation.direct_detrended_fluctuation_estimation(knots, smooth=True, smoothing_penalty=0.2, technique='binomial_average', order=21, increment=20)[0] derivative_of_lsq = utils.derivative_forward_diff() derivative_time = time[:-1] derivative_knots = np.linspace(knots[0], knots[-1], 31) # change (1) detrended_fluctuation_technique and (2) max_internal_iter and (3) debug (confusing with external debugging) emd = AdvEMDpy.EMD(time=derivative_time, time_series=derivative_of_lsq) imf_1_of_derivative = emd.empirical_mode_decomposition(knots=derivative_knots, knot_time=derivative_time, text=False, verbose=False)[0][1, :] utils = emd_utils.Utility(time=time[:-1], time_series=imf_1_of_derivative) optimal_maxima = np.r_[False, utils.derivative_forward_diff() < 0, False] & \ np.r_[utils.zero_crossing() == 1, False] optimal_minima = np.r_[False, utils.derivative_forward_diff() > 0, False] & \ np.r_[utils.zero_crossing() == 1, False] EEMD_maxima_envelope = fluctuation.envelope_basis_function_approximation_fixed_points(knots, 'maxima', optimal_maxima, optimal_minima, smooth=False, smoothing_penalty=0.2, edge_effect='none')[0] EEMD_minima_envelope = fluctuation.envelope_basis_function_approximation_fixed_points(knots, 'minima', optimal_maxima, optimal_minima, smooth=False, smoothing_penalty=0.2, edge_effect='none')[0] ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Detrended Fluctuation Analysis Examples') plt.plot(time, time_series, LineWidth=2, label='Time series') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(time[optimal_maxima], time_series[optimal_maxima], c='darkred', zorder=4, label=textwrap.fill('Optimal maxima', 10)) plt.scatter(time[optimal_minima], time_series[optimal_minima], c='darkblue', zorder=4, label=textwrap.fill('Optimal minima', 10)) plt.scatter(inflection_x, inflection_y, c='magenta', zorder=4, label=textwrap.fill('Inflection points', 10)) plt.plot(time, maxima_envelope, c='darkblue', label=textwrap.fill('EMD envelope', 10)) plt.plot(time, minima_envelope, c='darkblue') plt.plot(time, (maxima_envelope + minima_envelope) / 2, c='darkblue') plt.plot(time, maxima_envelope_smooth, c='darkred', label=textwrap.fill('SEMD envelope', 10)) plt.plot(time, minima_envelope_smooth, c='darkred') plt.plot(time, (maxima_envelope_smooth + minima_envelope_smooth) / 2, c='darkred') plt.plot(time, EEMD_maxima_envelope, c='darkgreen', label=textwrap.fill('EEMD envelope', 10)) plt.plot(time, EEMD_minima_envelope, c='darkgreen') plt.plot(time, (EEMD_maxima_envelope + EEMD_minima_envelope) / 2, c='darkgreen') plt.plot(time, inflection_points_envelope, c='darkorange', label=textwrap.fill('Inflection point envelope', 10)) plt.plot(time, binomial_points_envelope, c='deeppink', label=textwrap.fill('Binomial average envelope', 10)) plt.plot(time, np.cos(time), c='black', label='True mean') plt.xticks((0, 1 * np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi), (r'$0$', r'$\pi$', r'2$\pi$', r'3$\pi$', r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) plt.xlim(-0.25 * np.pi, 5.25 * np.pi) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/detrended_fluctuation_analysis.png') plt.show() # Duffing Equation Example def duffing_equation(xy, ts): gamma = 0.1 epsilon = 1 omega = ((2 * np.pi) / 25) return [xy[1], xy[0] - epsilon * xy[0] ** 3 + gamma * np.cos(omega * ts)] t = np.linspace(0, 150, 1501) XY0 = [1, 1] solution = odeint(duffing_equation, XY0, t) x = solution[:, 0] dxdt = solution[:, 1] x_points = [0, 50, 100, 150] x_names = {0, 50, 100, 150} y_points_1 = [-2, 0, 2] y_points_2 = [-1, 0, 1] fig, axs = plt.subplots(2, 1) plt.subplots_adjust(hspace=0.2) axs[0].plot(t, x) axs[0].set_title('Duffing Equation Displacement') axs[0].set_ylim([-2, 2]) axs[0].set_xlim([0, 150]) axs[1].plot(t, dxdt) axs[1].set_title('Duffing Equation Velocity') axs[1].set_ylim([-1.5, 1.5]) axs[1].set_xlim([0, 150]) axis = 0 for ax in axs.flat: ax.label_outer() if axis == 0: ax.set_ylabel('x(t)') ax.set_yticks(y_points_1) if axis == 1: ax.set_ylabel(r'$ \dfrac{dx(t)}{dt} $') ax.set(xlabel='t') ax.set_yticks(y_points_2) ax.set_xticks(x_points) ax.set_xticklabels(x_names) axis += 1 plt.savefig('jss_figures/Duffing_equation.png') plt.show() # compare other packages Duffing - top pyemd = pyemd0215() py_emd = pyemd(x) IP, IF, IA = emd040.spectra.frequency_transform(py_emd.T, 10, 'hilbert') freq_edges, freq_bins = emd040.spectra.define_hist_bins(0, 0.2, 100) hht = emd040.spectra.hilberthuang(IF, IA, freq_edges) hht = gaussian_filter(hht, sigma=1) ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 1.0 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Duffing Equation using PyEMD 0.2.10', 40)) plt.pcolormesh(t, freq_bins, hht, cmap='gist_rainbow', vmin=0, vmax=np.max(np.max(np.abs(hht)))) plt.plot(t[:-1], 0.124 * np.ones_like(t[:-1]), '--', label=textwrap.fill('Hamiltonian frequency approximation', 15)) plt.plot(t[:-1], 0.04 * np.ones_like(t[:-1]), 'g--', label=textwrap.fill('Driving function frequency', 15)) plt.xticks([0, 50, 100, 150]) plt.yticks([0, 0.1, 0.2]) plt.ylabel('Frequency (Hz)') plt.xlabel('Time (s)') box_0 = ax.get_position() ax.set_position([box_0.x0, box_0.y0 + 0.05, box_0.width * 0.75, box_0.height * 0.9]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/Duffing_equation_ht_pyemd.png') plt.show() plt.show() emd_sift = emd040.sift.sift(x) IP, IF, IA = emd040.spectra.frequency_transform(emd_sift, 10, 'hilbert') freq_edges, freq_bins = emd040.spectra.define_hist_bins(0, 0.2, 100) hht = emd040.spectra.hilberthuang(IF, IA, freq_edges) hht = gaussian_filter(hht, sigma=1) ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 1.0 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Duffing Equation using emd 0.3.3', 40)) plt.pcolormesh(t, freq_bins, hht, cmap='gist_rainbow', vmin=0, vmax=np.max(np.max(np.abs(hht)))) plt.plot(t[:-1], 0.124 * np.ones_like(t[:-1]), '--', label=textwrap.fill('Hamiltonian frequency approximation', 15)) plt.plot(t[:-1], 0.04 * np.ones_like(t[:-1]), 'g--', label=textwrap.fill('Driving function frequency', 15)) plt.xticks([0, 50, 100, 150]) plt.yticks([0, 0.1, 0.2]) plt.ylabel('Frequency (Hz)') plt.xlabel('Time (s)') box_0 = ax.get_position() ax.set_position([box_0.x0, box_0.y0 + 0.05, box_0.width * 0.75, box_0.height * 0.9]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/Duffing_equation_ht_emd.png') plt.show() # compare other packages Duffing - bottom emd_duffing = AdvEMDpy.EMD(time=t, time_series=x) emd_duff, emd_ht_duff, emd_if_duff, _, _, _, _ = emd_duffing.empirical_mode_decomposition(verbose=False) fig, axs = plt.subplots(2, 1) plt.subplots_adjust(hspace=0.3) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) axs[0].plot(t, emd_duff[1, :], label='AdvEMDpy') axs[0].plot(t, py_emd[0, :], '--', label='PyEMD 0.2.10') axs[0].plot(t, emd_sift[:, 0], '--', label='emd 0.3.3') axs[0].set_title('IMF 1') axs[0].set_ylim([-2, 2]) axs[0].set_xlim([0, 150]) axs[1].plot(t, emd_duff[2, :], label='AdvEMDpy') print(f'AdvEMDpy driving function error: {np.round(sum(abs(0.1 * np.cos(0.04 * 2 * np.pi * t) - emd_duff[2, :])), 3)}') axs[1].plot(t, py_emd[1, :], '--', label='PyEMD 0.2.10') print(f'PyEMD driving function error: {np.round(sum(abs(0.1 * np.cos(0.04 * 2 * np.pi * t) - py_emd[1, :])), 3)}') axs[1].plot(t, emd_sift[:, 1], '--', label='emd 0.3.3') print(f'emd driving function error: {np.round(sum(abs(0.1 * np.cos(0.04 * 2 * np.pi * t) - emd_sift[:, 1])), 3)}') axs[1].plot(t, 0.1 * np.cos(0.04 * 2 * np.pi * t), '--', label=r'$0.1$cos$(0.08{\pi}t)$') axs[1].set_title('IMF 2') axs[1].set_ylim([-0.2, 0.4]) axs[1].set_xlim([0, 150]) axis = 0 for ax in axs.flat: ax.label_outer() if axis == 0: ax.set_ylabel(r'$\gamma_1(t)$') ax.set_yticks([-2, 0, 2]) if axis == 1: ax.set_ylabel(r'$\gamma_2(t)$') ax.set_yticks([-0.2, 0, 0.2]) box_0 = ax.get_position() ax.set_position([box_0.x0, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) ax.set_xticks(x_points) ax.set_xticklabels(x_names) axis += 1 plt.savefig('jss_figures/Duffing_equation_imfs.png') plt.show() hs_ouputs = hilbert_spectrum(t, emd_duff, emd_ht_duff, emd_if_duff, max_frequency=1.3, plot=False) ax = plt.subplot(111) plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of Duffing Equation using AdvEMDpy', 40)) x, y, z = hs_ouputs y = y / (2 * np.pi) z_min, z_max = 0, np.abs(z).max() figure_size = plt.gcf().get_size_inches() factor = 1.0 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) ax.pcolormesh(x, y, np.abs(z), cmap='gist_rainbow', vmin=z_min, vmax=z_max) plt.plot(t[:-1], 0.124 * np.ones_like(t[:-1]), '--', label=textwrap.fill('Hamiltonian frequency approximation', 15)) plt.plot(t[:-1], 0.04 * np.ones_like(t[:-1]), 'g--', label=textwrap.fill('Driving function frequency', 15)) plt.xticks([0, 50, 100, 150]) plt.yticks([0, 0.1, 0.2]) plt.ylabel('Frequency (Hz)') plt.xlabel('Time (s)') box_0 = ax.get_position() ax.set_position([box_0.x0, box_0.y0 + 0.05, box_0.width * 0.75, box_0.height * 0.9]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/Duffing_equation_ht.png') plt.show() # Carbon Dioxide Concentration Example CO2_data = pd.read_csv('Data/co2_mm_mlo.csv', header=51) plt.plot(CO2_data['month'], CO2_data['decimal date']) plt.title(textwrap.fill('Mean Monthly Concentration of Carbon Dioxide in the Atmosphere', 35)) plt.ylabel('Parts per million') plt.xlabel('Time (years)') plt.savefig('jss_figures/CO2_concentration.png') plt.show() signal = CO2_data['decimal date'] signal = np.asarray(signal) time = CO2_data['month'] time = np.asarray(time) # compare other packages Carbon Dioxide - top pyemd = pyemd0215() py_emd = pyemd(signal) IP, IF, IA = emd040.spectra.frequency_transform(py_emd[:2, :].T, 12, 'hilbert') print(f'PyEMD annual frequency error: {np.round(sum(np.abs(IF[:, 0] - np.ones_like(IF[:, 0]))), 3)}') freq_edges, freq_bins = emd040.spectra.define_hist_bins(0, 2, 100) hht = emd040.spectra.hilberthuang(IF, IA, freq_edges) hht = gaussian_filter(hht, sigma=1) fig, ax = plt.subplots() figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.title(textwrap.fill('Gaussian Filtered Hilbert Spectrum of CO$_{2}$ Concentration using PyEMD 0.2.10', 45)) plt.ylabel('Frequency (year$^{-1}$)') plt.xlabel('Time (years)') plt.pcolormesh(time, freq_bins, hht, cmap='gist_rainbow', vmin=0, vmax=np.max(np.max(np.abs(hht)))) plt.plot(time,
np.ones_like(time)
numpy.ones_like
# -*- coding: utf-8 -*- """ Created on Thu Nov 28 12:10:11 2019 @author: Omer """ ## File handler ## This file was initially intended purely to generate the matrices for the near earth code found in: https://public.ccsds.org/Pubs/131x1o2e2s.pdf ## The values from the above pdf were copied manually to a txt file, and it is the purpose of this file to parse it. ## The emphasis here is on correctness, I currently do not see a reason to generalise this file, since matrices will be saved in either json or some matrix friendly format. import numpy as np from scipy.linalg import circulant #import matplotlib.pyplot as plt import scipy.io import common import hashlib import os projectDir = os.environ.get('LDPC') if projectDir == None: import pathlib projectDir = pathlib.Path(__file__).parent.absolute() ## <NAME>: added on 01/12/2020, need to make sure this doesn't break anything. import sys sys.path.insert(1, projectDir) FILE_HANDLER_INT_DATA_TYPE = np.int32 GENERAL_CODE_MATRIX_DATA_TYPE = np.int32 NIBBLE_CONVERTER = np.array([8, 4, 2, 1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) def nibbleToHex(inputArray): n = NIBBLE_CONVERTER.dot(inputArray) if n == 10: h = 'A' elif n== 11: h = 'B' elif n== 12: h = 'C' elif n== 13: h = 'D' elif n== 14: h = 'E' elif n== 15: h = 'F' else: h = str(n) return h def binaryArraytoHex(inputArray): d1 = len(inputArray) assert (d1 % 4 == 0) outputArray = np.zeros(d1//4, dtype = str) outputString = '' for j in range(d1//4): nibble = inputArray[4 * j : 4 * j + 4] h = nibbleToHex(nibble) outputArray[j] = h outputString = outputString + h return outputArray, outputString def hexStringToBinaryArray(hexString): outputBinary = np.array([], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) for i in hexString: if i == '0': nibble = np.array([0,0,0,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '1': nibble = np.array([0,0,0,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '2': nibble = np.array([0,0,1,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '3': nibble = np.array([0,0,1,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '4': nibble = np.array([0,1,0,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '5': nibble = np.array([0,1,0,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '6': nibble = np.array([0,1,1,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '7': nibble =
np.array([0,1,1,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
numpy.array
import argparse import json import numpy as np import pandas as pd import os from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report,f1_score from keras.models import Sequential from keras.layers import Dense, Dropout from keras import backend as K from keras.utils.vis_utils import plot_model from sklearn.externals import joblib import time def f1(y_true, y_pred): def recall(y_true, y_pred): """Recall metric. Only computes a batch-wise average of recall. Computes the recall, a metric for multi-label classification of how many relevant items are selected. """ true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) recall = true_positives / (possible_positives + K.epsilon()) return recall def precision(y_true, y_pred): """Precision metric. Only computes a batch-wise average of precision. Computes the precision, a metric for multi-label classification of how many selected items are relevant. """ true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + K.epsilon()) return precision precision = precision(y_true, y_pred) recall = recall(y_true, y_pred) return 2*((precision*recall)/(precision+recall+K.epsilon())) def get_embeddings(sentences_list,layer_json): ''' :param sentences_list: the path o the sentences.txt :param layer_json: the path of the json file that contains the embeddings of the sentences :return: Dictionary with key each sentence of the sentences_list and as value the embedding ''' sentences = dict()#dict with key the index of each line of the sentences_list.txt and as value the sentence embeddings = dict()##dict with key the index of each sentence and as value the its embedding sentence_emb = dict()#key:sentence,value:its embedding with open(sentences_list,'r') as file: for index,line in enumerate(file): sentences[index] = line.strip() with open(layer_json, 'r',encoding='utf-8') as f: for line in f: embeddings[json.loads(line)['linex_index']] = np.asarray(json.loads(line)['features']) for key,value in sentences.items(): sentence_emb[value] = embeddings[key] return sentence_emb def train_classifier(sentences_list,layer_json,dataset_csv,filename): ''' :param sentences_list: the path o the sentences.txt :param layer_json: the path of the json file that contains the embeddings of the sentences :param dataset_csv: the path of the dataset :param filename: The path of the pickle file that the model will be stored :return: ''' dataset = pd.read_csv(dataset_csv) bert_dict = get_embeddings(sentences_list,layer_json) length = list() sentence_emb = list() previous_emb = list() next_list = list() section_list = list() label = list() errors = 0 for row in dataset.iterrows(): sentence = row[1][0].strip() previous = row[1][1].strip() nexts = row[1][2].strip() section = row[1][3].strip() if sentence in bert_dict: sentence_emb.append(bert_dict[sentence]) else: sentence_emb.append(np.zeros(768)) print(sentence) errors += 1 if previous in bert_dict: previous_emb.append(bert_dict[previous]) else: previous_emb.append(np.zeros(768)) if nexts in bert_dict: next_list.append(bert_dict[nexts]) else: next_list.append(np.zeros(768)) if section in bert_dict: section_list.append(bert_dict[section]) else: section_list.append(np.zeros(768)) length.append(row[1][4]) label.append(row[1][5]) sentence_emb = np.asarray(sentence_emb) print(sentence_emb.shape) next_emb = np.asarray(next_list) print(next_emb.shape) previous_emb = np.asarray(previous_emb) print(previous_emb.shape) section_emb = np.asarray(section_list) print(sentence_emb.shape) length = np.asarray(length) print(length.shape) label = np.asarray(label) print(errors) features = np.concatenate([sentence_emb, previous_emb, next_emb,section_emb], axis=1) features = np.column_stack([features, length]) # np.append(features,length,axis=1) print(features.shape) X_train, X_val, y_train, y_val = train_test_split(features, label, test_size=0.33, random_state=42) log = LogisticRegression(random_state=0, solver='newton-cg', max_iter=1000, C=0.1) log.fit(X_train, y_train) #save the model _ = joblib.dump(log, filename, compress=9) predictions = log.predict(X_val) print("###########################################") print("Results using embeddings from the",layer_json,"file") print(classification_report(y_val, predictions)) print("F1 score using Logistic Regression:",f1_score(y_val, predictions)) print("###########################################") #train a DNN f1_results = list() for i in range(3): model = Sequential() model.add(Dense(64, activation='relu', trainable=True)) model.add(Dense(128, activation='relu', trainable=True)) model.add(Dropout(0.30)) model.add(Dense(64, activation='relu', trainable=True)) model.add(Dropout(0.25)) model.add(Dense(64, activation='relu', trainable=True)) model.add(Dropout(0.35)) model.add(Dense(1, activation='sigmoid')) # compile network model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=[f1]) # fit network model.fit(X_train, y_train, epochs=100, batch_size=64) loss, f_1 = model.evaluate(X_val, y_val, verbose=1) print('\nTest F1: %f' % (f_1 * 100)) f1_results.append(f_1) model = None print("###########################################") print("Results using embeddings from the", layer_json, "file") # evaluate print(np.mean(f1_results)) print("###########################################") def parameter_tuning_LR(sentences_list,layer_json,dataset_csv): ''' :param sentences_list: the path o the sentences.txt :param layer_json: the path of the json file that contains the embeddings of the sentences :param dataset_csv: the path of the dataset :return: ''' dataset = pd.read_csv(dataset_csv) bert_dict = get_embeddings(sentences_list,layer_json) length = list() sentence_emb = list() previous_emb = list() next_list = list() section_list = list() label = list() errors = 0 for row in dataset.iterrows(): sentence = row[1][0].strip() previous = row[1][1].strip() nexts = row[1][2].strip() section = row[1][3].strip() if sentence in bert_dict: sentence_emb.append(bert_dict[sentence]) else: sentence_emb.append(np.zeros(768)) print(sentence) errors += 1 if previous in bert_dict: previous_emb.append(bert_dict[previous]) else: previous_emb.append(np.zeros(768)) if nexts in bert_dict: next_list.append(bert_dict[nexts]) else: next_list.append(np.zeros(768)) if section in bert_dict: section_list.append(bert_dict[section]) else: section_list.append(np.zeros(768)) length.append(row[1][4]) label.append(row[1][5]) sentence_emb = np.asarray(sentence_emb) print(sentence_emb.shape) next_emb = np.asarray(next_list) print(next_emb.shape) previous_emb = np.asarray(previous_emb) print(previous_emb.shape) section_emb = np.asarray(section_list) print(sentence_emb.shape) length = np.asarray(length) print(length.shape) label = np.asarray(label) print(errors) features = np.concatenate([sentence_emb, previous_emb, next_emb,section_emb], axis=1) features = np.column_stack([features, length]) print(features.shape) X_train, X_val, y_train, y_val = train_test_split(features, label, test_size=0.33, random_state=42) C = [0.1,1,2,5,10] solver = ['newton-cg','saga','sag'] best_params = dict() best_score = 0.0 for c in C: for s in solver: start = time.time() log = LogisticRegression(random_state=0, solver=s, max_iter=1000, C=c) log.fit(X_train, y_train) predictions = log.predict(X_val) print("###########################################") print("LR with C =",c,'and solver = ',s) print("Results using embeddings from the", layer_json, "file") print(classification_report(y_val, predictions)) f1 = f1_score(y_val, predictions) if f1 > best_score: best_score = f1 best_params['c'] = c best_params['solver'] = s print("F1 score using Logistic Regression:",f1) print("###########################################") end = time.time() running_time = end - start print("Running time:"+str(running_time)) def visualize_DNN(file_to_save): ''' Save the DNN architecture to a png file. Better use the Visulize_DNN.ipynd :param file_to_save: the png file that the architecture of the DNN will be saved. :return: None ''' model = Sequential() model.add(Dense(64, activation='relu', trainable=True)) model.add(Dense(128, activation='relu', trainable=True)) model.add(Dropout(0.30)) model.add(Dense(64, activation='relu', trainable=True)) model.add(Dropout(0.25)) model.add(Dense(64, activation='relu', trainable=True)) model.add(Dropout(0.35)) model.add(Dense(1, activation='sigmoid')) plot_model(model, to_file=file_to_save, show_shapes=True) def save_model(sentences_list,layer_json,dataset_csv,pkl): dataset = pd.read_csv(dataset_csv) bert_dict = get_embeddings(sentences_list, layer_json) length = list() sentence_emb = list() previous_emb = list() next_list = list() section_list = list() label = list() errors = 0 for row in dataset.iterrows(): sentence = row[1][0].strip() previous = row[1][1].strip() nexts = row[1][2].strip() section = row[1][3].strip() if sentence in bert_dict: sentence_emb.append(bert_dict[sentence]) else: sentence_emb.append(np.zeros(768)) print(sentence) errors += 1 if previous in bert_dict: previous_emb.append(bert_dict[previous]) else: previous_emb.append(np.zeros(768)) if nexts in bert_dict: next_list.append(bert_dict[nexts]) else: next_list.append(np.zeros(768)) if section in bert_dict: section_list.append(bert_dict[section]) else: section_list.append(np.zeros(768)) length.append(row[1][4]) label.append(row[1][5]) sentence_emb = np.asarray(sentence_emb) print(sentence_emb.shape) next_emb = np.asarray(next_list) print(next_emb.shape) previous_emb = np.asarray(previous_emb) print(previous_emb.shape) section_emb = np.asarray(section_list) print(sentence_emb.shape) length = np.asarray(length) print(length.shape) label = np.asarray(label) print(errors) features =
np.concatenate([sentence_emb, previous_emb, next_emb, section_emb], axis=1)
numpy.concatenate
import numpy as np from stumpff import C, S from CelestialBody import BODIES from numerical import newton, laguerre from lagrange import calc_f, calc_fd, calc_g, calc_gd def kepler_chi(chi, alpha, r0, vr0, mu, dt): ''' Kepler's Equation of the universal anomaly, modified for use in numerical solvers. ''' z = alpha*chi**2 return (r0*vr0/np.sqrt(mu))*chi**2*C(z) + \ (1 - alpha*r0)*chi**3*S(z) + \ r0*chi - np.sqrt(mu)*dt def dkepler_dchi(chi, alpha, r0, vr0, mu, dt): ''' Derivative of Kepler's Equation of the universal anomaly, modified for use in numerical solvers. ''' z = alpha*chi**2 return (r0*vr0/np.sqrt(mu))*chi*(1 - alpha*chi**2*S(z)) + \ (1 - alpha*r0)*chi**2*C(z) + r0 def d2kepler_dchi2(chi, alpha, r0, vr0, mu, dt): ''' Second derivative of Kepler's Equation of the universal anomaly, modified for use in numerical solvers. ''' z = alpha*chi**2 S_ = S(z) return (r0*vr0/np.sqrt(mu))*(1 - 3*z*S_ + z*(C(z) - 3*S_)) + \ chi*(1 - z*S_)*(1 - alpha*r0) def solve_kepler_chi(r_0, v_0, dt, body=BODIES['Earth'], method='laguerre', tol=1e-7, max_iters=100): ''' Solve Kepler's Equation of the universal anomaly chi using the specified numerical method. Applies Algorithm 3.4 from Orbital Mechanics for Engineering Students, 4 ed, Curtis. :param r_0: `iterable` (km) initial position 3-vector :param v_0: `iterable` (km/s) initial velocity 3-vector :param dt: `float` (s) time after initial state to solve for r, v as 3-vectors :param body: `CelestialBody` (--) the celestial body to use for orbital parameters :param method: `str` (--) which numerical method to use to solve Kepler's Equation :param tol: `float` (--) decimal tolerance for numerical method (default 1e-7 is IEEE 745 single precision) :param max_iters: `int` (--) maximum number of iterations in numerical method before breaking :return: (km) final position 3-vector, (km/s) final velocity 3-vector ''' VALID_METHODS = ('laguerre', 'newton') mu = body.mu # (km**3/s**2) gravitational parameter of the specified primary body r0 = np.linalg.norm(r_0) # (km) initial position magnitude v0 = np.linalg.norm(v_0) # (km/s) initial velocity magnitude vr0 = np.dot(v_0, r_0)/r0 # (km/s) initial radial velocity magnitude alpha = 2/r0 - v0**2/mu # (1/km) inverse of semi-major axis chi0 =
np.sqrt(mu)
numpy.sqrt
# coding: utf-8 # Licensed under a 3-clause BSD style license - see LICENSE.rst """ Test the Logarithmic Units and Quantities """ from __future__ import (absolute_import, unicode_literals, division, print_function) from ...extern import six from ...extern.six.moves import zip import pickle import itertools import pytest import numpy as np from numpy.testing.utils import assert_allclose from ...tests.helper import assert_quantity_allclose from ... import units as u, constants as c lu_units = [u.dex, u.mag, u.decibel] lu_subclasses = [u.DexUnit, u.MagUnit, u.DecibelUnit] lq_subclasses = [u.Dex, u.Magnitude, u.Decibel] pu_sample = (u.dimensionless_unscaled, u.m, u.g/u.s**2, u.Jy) class TestLogUnitCreation(object): def test_logarithmic_units(self): """Check logarithmic units are set up correctly.""" assert u.dB.to(u.dex) == 0.1 assert u.dex.to(u.mag) == -2.5 assert u.mag.to(u.dB) == -4 @pytest.mark.parametrize('lu_unit, lu_cls', zip(lu_units, lu_subclasses)) def test_callable_units(self, lu_unit, lu_cls): assert isinstance(lu_unit, u.UnitBase) assert callable(lu_unit) assert lu_unit._function_unit_class is lu_cls @pytest.mark.parametrize('lu_unit', lu_units) def test_equality_to_normal_unit_for_dimensionless(self, lu_unit): lu = lu_unit() assert lu == lu._default_function_unit # eg, MagUnit() == u.mag assert lu._default_function_unit == lu # and u.mag == MagUnit() @pytest.mark.parametrize('lu_unit, physical_unit', itertools.product(lu_units, pu_sample)) def test_call_units(self, lu_unit, physical_unit): """Create a LogUnit subclass using the callable unit and physical unit, and do basic check that output is right.""" lu1 = lu_unit(physical_unit) assert lu1.physical_unit == physical_unit assert lu1.function_unit == lu1._default_function_unit def test_call_invalid_unit(self): with pytest.raises(TypeError): u.mag([]) with pytest.raises(ValueError): u.mag(u.mag()) @pytest.mark.parametrize('lu_cls, physical_unit', itertools.product( lu_subclasses + [u.LogUnit], pu_sample)) def test_subclass_creation(self, lu_cls, physical_unit): """Create a LogUnit subclass object for given physical unit, and do basic check that output is right.""" lu1 = lu_cls(physical_unit) assert lu1.physical_unit == physical_unit assert lu1.function_unit == lu1._default_function_unit lu2 = lu_cls(physical_unit, function_unit=2*lu1._default_function_unit) assert lu2.physical_unit == physical_unit assert lu2.function_unit == u.Unit(2*lu2._default_function_unit) with pytest.raises(ValueError): lu_cls(physical_unit, u.m) def test_predefined_magnitudes(): assert_quantity_allclose((-21.1*u.STmag).physical, 1.*u.erg/u.cm**2/u.s/u.AA) assert_quantity_allclose((-48.6*u.ABmag).physical, 1.*u.erg/u.cm**2/u.s/u.Hz) assert_quantity_allclose((0*u.M_bol).physical, c.L_bol0) assert_quantity_allclose((0*u.m_bol).physical, c.L_bol0/(4.*np.pi*(10.*c.pc)**2)) def test_predefined_reinitialisation(): assert u.mag('ST') == u.STmag assert u.mag('AB') == u.ABmag assert u.mag('Bol') == u.M_bol assert u.mag('bol') == u.m_bol def test_predefined_string_roundtrip(): """Ensure roundtripping; see #5015""" with u.magnitude_zero_points.enable(): assert u.Unit(u.STmag.to_string()) == u.STmag assert u.Unit(u.ABmag.to_string()) == u.ABmag assert u.Unit(u.M_bol.to_string()) == u.M_bol assert u.Unit(u.m_bol.to_string()) == u.m_bol def test_inequality(): """Check __ne__ works (regresssion for #5342).""" lu1 = u.mag(u.Jy) lu2 = u.dex(u.Jy) lu3 = u.mag(u.Jy**2) lu4 = lu3 - lu1 assert lu1 != lu2 assert lu1 != lu3 assert lu1 == lu4 class TestLogUnitStrings(object): def test_str(self): """Do some spot checks that str, repr, etc. work as expected.""" lu1 = u.mag(u.Jy) assert str(lu1) == 'mag(Jy)' assert repr(lu1) == 'Unit("mag(Jy)")' assert lu1.to_string('generic') == 'mag(Jy)' with pytest.raises(ValueError): lu1.to_string('fits') lu2 = u.dex() assert str(lu2) == 'dex' assert repr(lu2) == 'Unit("dex(1)")' assert lu2.to_string() == 'dex(1)' lu3 = u.MagUnit(u.Jy, function_unit=2*u.mag) assert str(lu3) == '2 mag(Jy)' assert repr(lu3) == 'MagUnit("Jy", unit="2 mag")' assert lu3.to_string() == '2 mag(Jy)' lu4 = u.mag(u.ct) assert lu4.to_string('generic') == 'mag(ct)' assert lu4.to_string('latex') == ('$\\mathrm{mag}$$\\mathrm{\\left( ' '\\mathrm{ct} \\right)}$') assert lu4._repr_latex_() == lu4.to_string('latex') class TestLogUnitConversion(object): @pytest.mark.parametrize('lu_unit, physical_unit', itertools.product(lu_units, pu_sample)) def test_physical_unit_conversion(self, lu_unit, physical_unit): """Check various LogUnit subclasses are equivalent and convertible to their non-log counterparts.""" lu1 = lu_unit(physical_unit) assert lu1.is_equivalent(physical_unit) assert lu1.to(physical_unit, 0.) == 1. assert physical_unit.is_equivalent(lu1) assert physical_unit.to(lu1, 1.) == 0. pu = u.Unit(8.*physical_unit) assert lu1.is_equivalent(physical_unit) assert lu1.to(pu, 0.) == 0.125 assert pu.is_equivalent(lu1) assert_allclose(pu.to(lu1, 0.125), 0., atol=1.e-15) # Check we round-trip. value = np.linspace(0., 10., 6) assert_allclose(pu.to(lu1, lu1.to(pu, value)), value, atol=1.e-15) # And that we're not just returning True all the time. pu2 = u.g assert not lu1.is_equivalent(pu2) with pytest.raises(u.UnitsError): lu1.to(pu2) assert not pu2.is_equivalent(lu1) with pytest.raises(u.UnitsError): pu2.to(lu1) @pytest.mark.parametrize('lu_unit', lu_units) def test_container_unit_conversion(self, lu_unit): """Check that conversion to logarithmic units (u.mag, u.dB, u.dex) is only possible when the physical unit is dimensionless.""" values = np.linspace(0., 10., 6) lu1 = lu_unit(u.dimensionless_unscaled) assert lu1.is_equivalent(lu1.function_unit) assert_allclose(lu1.to(lu1.function_unit, values), values) lu2 = lu_unit(u.Jy) assert not lu2.is_equivalent(lu2.function_unit) with pytest.raises(u.UnitsError): lu2.to(lu2.function_unit, values) @pytest.mark.parametrize( 'flu_unit, tlu_unit, physical_unit', itertools.product(lu_units, lu_units, pu_sample)) def test_subclass_conversion(self, flu_unit, tlu_unit, physical_unit): """Check various LogUnit subclasses are equivalent and convertible to each other if they correspond to equivalent physical units.""" values = np.linspace(0., 10., 6) flu = flu_unit(physical_unit) tlu = tlu_unit(physical_unit) assert flu.is_equivalent(tlu) assert_allclose(flu.to(tlu), flu.function_unit.to(tlu.function_unit)) assert_allclose(flu.to(tlu, values), values * flu.function_unit.to(tlu.function_unit)) tlu2 = tlu_unit(u.Unit(100.*physical_unit)) assert flu.is_equivalent(tlu2) # Check that we round-trip. assert_allclose(flu.to(tlu2, tlu2.to(flu, values)), values, atol=1.e-15) tlu3 = tlu_unit(physical_unit.to_system(u.si)[0]) assert flu.is_equivalent(tlu3) assert_allclose(flu.to(tlu3, tlu3.to(flu, values)), values, atol=1.e-15) tlu4 = tlu_unit(u.g) assert not flu.is_equivalent(tlu4) with pytest.raises(u.UnitsError): flu.to(tlu4, values) def test_unit_decomposition(self): lu = u.mag(u.Jy) assert lu.decompose() == u.mag(u.Jy.decompose()) assert lu.decompose().physical_unit.bases == [u.kg, u.s] assert lu.si == u.mag(u.Jy.si) assert lu.si.physical_unit.bases == [u.kg, u.s] assert lu.cgs == u.mag(u.Jy.cgs) assert lu.cgs.physical_unit.bases == [u.g, u.s] def test_unit_multiple_possible_equivalencies(self): lu = u.mag(u.Jy) assert lu.is_equivalent(pu_sample) class TestLogUnitArithmetic(object): def test_multiplication_division(self): """Check that multiplication/division with other units is only possible when the physical unit is dimensionless, and that this turns the unit into a normal one.""" lu1 = u.mag(u.Jy) with pytest.raises(u.UnitsError): lu1 * u.m with pytest.raises(u.UnitsError): u.m * lu1 with pytest.raises(u.UnitsError): lu1 / lu1 for unit in (u.dimensionless_unscaled, u.m, u.mag, u.dex): with pytest.raises(u.UnitsError): lu1 / unit lu2 = u.mag(u.dimensionless_unscaled) with pytest.raises(u.UnitsError): lu2 * lu1 with pytest.raises(u.UnitsError): lu2 / lu1 # But dimensionless_unscaled can be cancelled. assert lu2 / lu2 == u.dimensionless_unscaled # With dimensionless, normal units are OK, but we return a plain unit. tf = lu2 * u.m tr = u.m * lu2 for t in (tf, tr): assert not isinstance(t, type(lu2)) assert t == lu2.function_unit * u.m with u.set_enabled_equivalencies(u.logarithmic()): with pytest.raises(u.UnitsError): t.to(lu2.physical_unit) # Now we essentially have a LogUnit with a prefactor of 100, # so should be equivalent again. t = tf / u.cm with u.set_enabled_equivalencies(u.logarithmic()): assert t.is_equivalent(lu2.function_unit) assert_allclose(t.to(u.dimensionless_unscaled,
np.arange(3.)
numpy.arange
# ________ # / # \ / # \ / # \/ import random import textwrap import emd_mean import AdvEMDpy import emd_basis import emd_utils import numpy as np import pandas as pd import cvxpy as cvx import seaborn as sns import matplotlib.pyplot as plt from scipy.integrate import odeint from scipy.ndimage import gaussian_filter from emd_utils import time_extension, Utility from scipy.interpolate import CubicSpline from emd_hilbert import Hilbert, hilbert_spectrum from emd_preprocess import Preprocess from emd_mean import Fluctuation from AdvEMDpy import EMD # alternate packages from PyEMD import EMD as pyemd0215 import emd as emd040 sns.set(style='darkgrid') pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001) pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time) pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series) # plot 0 - addition fig = plt.figure(figsize=(9, 4)) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('First Iteration of Sifting Algorithm') plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1) plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()], c='r', label=r'$M(t_i)$', zorder=2) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4) plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()], c='c', label=r'$m(t_j)$', zorder=3) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5) plt.yticks(ticks=[-2, -1, 0, 1, 2]) plt.xticks(ticks=[0, np.pi, 2 * np.pi], labels=[r'0', r'$\pi$', r'$2\pi$']) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/pseudo_algorithm.png') plt.show() knots = np.arange(12) time = np.linspace(0, 11, 1101) basis = emd_basis.Basis(time=time, time_series=time) b_spline_basis = basis.cubic_b_spline(knots) chsi_basis = basis.chsi_basis(knots) # plot 1 plt.title('Non-Natural Cubic B-Spline Bases at Boundary') plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $') plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $') plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $') plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $') plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $') plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $']) plt.xlim(4.4, 6.6) plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.legend(loc='upper left') plt.savefig('jss_figures/boundary_bases.png') plt.show() # plot 1a - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) knots_uniform = np.linspace(0, 2 * np.pi, 51) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0] fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Uniform Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Uniform Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Uniform Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots_uniform)): axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_uniform.png') plt.show() # plot 1b - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=1, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Statically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Statically Optimised Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Statically Optimised Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots)): axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_1.png') plt.show() # plot 1c - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=2, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Dynamically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Dynamically Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Dynamically Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots[i])): axs[i].plot(knots[i][j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_2.png') plt.show() # plot 1d - addition window = 81 fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Filtering Demonstration') axs[1].set_title('Zoomed Region') preprocess_time = pseudo_alg_time.copy() np.random.seed(1) random.seed(1) preprocess_time_series = pseudo_alg_time_series + np.random.normal(0, 0.1, len(preprocess_time)) for i in random.sample(range(1000), 500): preprocess_time_series[i] += np.random.normal(0, 1) preprocess = Preprocess(time=preprocess_time, time_series=preprocess_time_series) axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[0].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[0].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[0].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[1].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[1].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[1].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_filter.png') plt.show() # plot 1e - addition fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Smoothing Demonstration') axs[1].set_title('Zoomed Region') axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[0].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) downsampled_and_decimated = preprocess.downsample() axs[0].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 11)) downsampled = preprocess.downsample(decimate=False) axs[0].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[1].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) axs[1].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 13)) axs[1].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.06, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.06, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_smooth.png') plt.show() # plot 2 fig, axs = plt.subplots(1, 2, sharey=True) axs[0].set_title('Cubic B-Spline Bases') axs[0].plot(time, b_spline_basis[2, :].T, '--', label='Basis 1') axs[0].plot(time, b_spline_basis[3, :].T, '--', label='Basis 2') axs[0].plot(time, b_spline_basis[4, :].T, '--', label='Basis 3') axs[0].plot(time, b_spline_basis[5, :].T, '--', label='Basis 4') axs[0].legend(loc='upper left') axs[0].plot(5 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].plot(6 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].set_xticks([5, 6]) axs[0].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[0].set_xlim(4.5, 6.5) axs[1].set_title('Cubic Hermite Spline Bases') axs[1].plot(time, chsi_basis[10, :].T, '--') axs[1].plot(time, chsi_basis[11, :].T, '--') axs[1].plot(time, chsi_basis[12, :].T, '--') axs[1].plot(time, chsi_basis[13, :].T, '--') axs[1].plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].set_xticks([5, 6]) axs[1].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[1].set_xlim(4.5, 6.5) plt.savefig('jss_figures/comparing_bases.png') plt.show() # plot 3 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_dash = maxima_y[-1] * np.ones_like(max_dash_time) min_dash_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_dash = minima_y[-1] * np.ones_like(min_dash_time) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) max_discard = maxima_y[-1] max_discard_time = minima_x[-1] - maxima_x[-1] + minima_x[-1] max_discard_dash_time = np.linspace(max_discard_time - width, max_discard_time + width, 101) max_discard_dash = max_discard * np.ones_like(max_discard_dash_time) dash_2_time = np.linspace(minima_x[-1], max_discard_time, 101) dash_2 = np.linspace(minima_y[-1], max_discard, 101) end_point_time = time[-1] end_point = time_series[-1] time_reflect = np.linspace((5 - a) * np.pi, (5 + a) * np.pi, 101) time_series_reflect = np.flip(np.cos(np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101)) + np.cos(5 * np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101))) time_series_anti_reflect = time_series_reflect[0] - time_series_reflect utils = emd_utils.Utility(time=time, time_series=time_series_anti_reflect) anti_max_bool = utils.max_bool_func_1st_order_fd() anti_max_point_time = time_reflect[anti_max_bool] anti_max_point = time_series_anti_reflect[anti_max_bool] utils = emd_utils.Utility(time=time, time_series=time_series_reflect) no_anchor_max_time = time_reflect[utils.max_bool_func_1st_order_fd()] no_anchor_max = time_series_reflect[utils.max_bool_func_1st_order_fd()] point_1 = 5.4 length_distance = np.linspace(maxima_y[-1], minima_y[-1], 101) length_distance_time = point_1 * np.pi * np.ones_like(length_distance) length_time = np.linspace(point_1 * np.pi - width, point_1 * np.pi + width, 101) length_top = maxima_y[-1] * np.ones_like(length_time) length_bottom = minima_y[-1] * np.ones_like(length_time) point_2 = 5.2 length_distance_2 = np.linspace(time_series[-1], minima_y[-1], 101) length_distance_time_2 = point_2 * np.pi * np.ones_like(length_distance_2) length_time_2 = np.linspace(point_2 * np.pi - width, point_2 * np.pi + width, 101) length_top_2 = time_series[-1] * np.ones_like(length_time_2) length_bottom_2 = minima_y[-1] * np.ones_like(length_time_2) symmetry_axis_1_time = minima_x[-1] * np.ones(101) symmetry_axis_2_time = time[-1] * np.ones(101) symmetry_axis = np.linspace(-2, 2, 101) end_time = np.linspace(time[-1] - width, time[-1] + width, 101) end_signal = time_series[-1] * np.ones_like(end_time) anti_symmetric_time = np.linspace(time[-1] - 0.5, time[-1] + 0.5, 101) anti_symmetric_signal = time_series[-1] * np.ones_like(anti_symmetric_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Symmetry Edge Effects Example') plt.plot(time_reflect, time_series_reflect, 'g--', LineWidth=2, label=textwrap.fill('Symmetric signal', 10)) plt.plot(time_reflect[:51], time_series_anti_reflect[:51], '--', c='purple', LineWidth=2, label=textwrap.fill('Anti-symmetric signal', 10)) plt.plot(max_dash_time, max_dash, 'k-') plt.plot(min_dash_time, min_dash, 'k-') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(length_distance_time, length_distance, 'k--') plt.plot(length_distance_time_2, length_distance_2, 'k--') plt.plot(length_time, length_top, 'k-') plt.plot(length_time, length_bottom, 'k-') plt.plot(length_time_2, length_top_2, 'k-') plt.plot(length_time_2, length_bottom_2, 'k-') plt.plot(end_time, end_signal, 'k-') plt.plot(symmetry_axis_1_time, symmetry_axis, 'r--', zorder=1) plt.plot(anti_symmetric_time, anti_symmetric_signal, 'r--', zorder=1) plt.plot(symmetry_axis_2_time, symmetry_axis, 'r--', label=textwrap.fill('Axes of symmetry', 10), zorder=1) plt.text(5.1 * np.pi, -0.7, r'$\beta$L') plt.text(5.34 * np.pi, -0.05, 'L') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(max_discard_time, max_discard, c='purple', zorder=4, label=textwrap.fill('Symmetric Discard maxima', 10)) plt.scatter(end_point_time, end_point, c='orange', zorder=4, label=textwrap.fill('Symmetric Anchor maxima', 10)) plt.scatter(anti_max_point_time, anti_max_point, c='green', zorder=4, label=textwrap.fill('Anti-Symmetric maxima', 10)) plt.scatter(no_anchor_max_time, no_anchor_max, c='gray', zorder=4, label=textwrap.fill('Symmetric maxima', 10)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_symmetry_anti.png') plt.show() # plot 4 a = 0.21 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_1 = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_dash_2 = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_dash_time_1 = maxima_x[-1] * np.ones_like(max_dash_1) max_dash_time_2 = maxima_x[-2] * np.ones_like(max_dash_1) min_dash_1 = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_dash_2 = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_dash_time_1 = minima_x[-1] * np.ones_like(min_dash_1) min_dash_time_2 = minima_x[-2] * np.ones_like(min_dash_1) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) dash_2_time = np.linspace(maxima_x[-1], minima_x[-2], 101) dash_2 = np.linspace(maxima_y[-1], minima_y[-2], 101) s1 = (minima_y[-2] - maxima_y[-1]) / (minima_x[-2] - maxima_x[-1]) slope_based_maximum_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) slope_based_maximum = minima_y[-1] + (slope_based_maximum_time - minima_x[-1]) * s1 max_dash_time_3 = slope_based_maximum_time * np.ones_like(max_dash_1) max_dash_3 = np.linspace(slope_based_maximum - width, slope_based_maximum + width, 101) dash_3_time = np.linspace(minima_x[-1], slope_based_maximum_time, 101) dash_3 = np.linspace(minima_y[-1], slope_based_maximum, 101) s2 = (minima_y[-1] - maxima_y[-1]) / (minima_x[-1] - maxima_x[-1]) slope_based_minimum_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) slope_based_minimum = slope_based_maximum - (slope_based_maximum_time - slope_based_minimum_time) * s2 min_dash_time_3 = slope_based_minimum_time * np.ones_like(min_dash_1) min_dash_3 = np.linspace(slope_based_minimum - width, slope_based_minimum + width, 101) dash_4_time = np.linspace(slope_based_maximum_time, slope_based_minimum_time) dash_4 = np.linspace(slope_based_maximum, slope_based_minimum) maxima_dash = np.linspace(2.5 - width, 2.5 + width, 101) maxima_dash_time_1 = maxima_x[-2] * np.ones_like(maxima_dash) maxima_dash_time_2 = maxima_x[-1] * np.ones_like(maxima_dash) maxima_dash_time_3 = slope_based_maximum_time * np.ones_like(maxima_dash) maxima_line_dash_time = np.linspace(maxima_x[-2], slope_based_maximum_time, 101) maxima_line_dash = 2.5 * np.ones_like(maxima_line_dash_time) minima_dash = np.linspace(-3.4 - width, -3.4 + width, 101) minima_dash_time_1 = minima_x[-2] * np.ones_like(minima_dash) minima_dash_time_2 = minima_x[-1] * np.ones_like(minima_dash) minima_dash_time_3 = slope_based_minimum_time * np.ones_like(minima_dash) minima_line_dash_time = np.linspace(minima_x[-2], slope_based_minimum_time, 101) minima_line_dash = -3.4 * np.ones_like(minima_line_dash_time) # slightly edit signal to make difference between slope-based method and improved slope-based method more clear time_series[time >= minima_x[-1]] = 1.5 * (time_series[time >= minima_x[-1]] - time_series[time == minima_x[-1]]) + \ time_series[time == minima_x[-1]] improved_slope_based_maximum_time = time[-1] improved_slope_based_maximum = time_series[-1] improved_slope_based_minimum_time = slope_based_minimum_time improved_slope_based_minimum = improved_slope_based_maximum + s2 * (improved_slope_based_minimum_time - improved_slope_based_maximum_time) min_dash_4 = np.linspace(improved_slope_based_minimum - width, improved_slope_based_minimum + width, 101) min_dash_time_4 = improved_slope_based_minimum_time * np.ones_like(min_dash_4) dash_final_time = np.linspace(improved_slope_based_maximum_time, improved_slope_based_minimum_time, 101) dash_final = np.linspace(improved_slope_based_maximum, improved_slope_based_minimum, 101) ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 0.9 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Slope-Based Edge Effects Example') plt.plot(max_dash_time_1, max_dash_1, 'k-') plt.plot(max_dash_time_2, max_dash_2, 'k-') plt.plot(max_dash_time_3, max_dash_3, 'k-') plt.plot(min_dash_time_1, min_dash_1, 'k-') plt.plot(min_dash_time_2, min_dash_2, 'k-') plt.plot(min_dash_time_3, min_dash_3, 'k-') plt.plot(min_dash_time_4, min_dash_4, 'k-') plt.plot(maxima_dash_time_1, maxima_dash, 'k-') plt.plot(maxima_dash_time_2, maxima_dash, 'k-') plt.plot(maxima_dash_time_3, maxima_dash, 'k-') plt.plot(minima_dash_time_1, minima_dash, 'k-') plt.plot(minima_dash_time_2, minima_dash, 'k-') plt.plot(minima_dash_time_3, minima_dash, 'k-') plt.text(4.34 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.74 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.12 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.50 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.30 * np.pi, 0.35, r'$s_1$') plt.text(4.43 * np.pi, -0.20, r'$s_2$') plt.text(4.30 * np.pi + (minima_x[-1] - minima_x[-2]), 0.35 + (minima_y[-1] - minima_y[-2]), r'$s_1$') plt.text(4.43 * np.pi + (slope_based_minimum_time - minima_x[-1]), -0.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.text(4.50 * np.pi + (slope_based_minimum_time - minima_x[-1]), 1.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.plot(minima_line_dash_time, minima_line_dash, 'k--') plt.plot(maxima_line_dash_time, maxima_line_dash, 'k--') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(dash_3_time, dash_3, 'k--') plt.plot(dash_4_time, dash_4, 'k--') plt.plot(dash_final_time, dash_final, 'k--') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(slope_based_maximum_time, slope_based_maximum, c='orange', zorder=4, label=textwrap.fill('Slope-based maximum', 11)) plt.scatter(slope_based_minimum_time, slope_based_minimum, c='purple', zorder=4, label=textwrap.fill('Slope-based minimum', 11)) plt.scatter(improved_slope_based_maximum_time, improved_slope_based_maximum, c='deeppink', zorder=4, label=textwrap.fill('Improved slope-based maximum', 11)) plt.scatter(improved_slope_based_minimum_time, improved_slope_based_minimum, c='dodgerblue', zorder=4, label=textwrap.fill('Improved slope-based minimum', 11)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-3, -2, -1, 0, 1, 2), ('-3', '-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_slope_based.png') plt.show() # plot 5 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] A2 = np.abs(maxima_y[-2] - minima_y[-2]) / 2 A1 = np.abs(maxima_y[-1] - minima_y[-1]) / 2 P2 = 2 * np.abs(maxima_x[-2] - minima_x[-2]) P1 = 2 * np.abs(maxima_x[-1] - minima_x[-1]) Huang_time = (P1 / P2) * (time[time >= maxima_x[-2]] - time[time == maxima_x[-2]]) + maxima_x[-1] Huang_wave = (A1 / A2) * (time_series[time >= maxima_x[-2]] - time_series[time == maxima_x[-2]]) + maxima_y[-1] Coughlin_time = Huang_time Coughlin_wave = A1 * np.cos(2 * np.pi * (1 / P1) * (Coughlin_time - Coughlin_time[0])) Average_max_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) Average_max = (maxima_y[-2] + maxima_y[-1]) / 2 Average_min_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) Average_min = (minima_y[-2] + minima_y[-1]) / 2 utils_Huang = emd_utils.Utility(time=time, time_series=Huang_wave) Huang_max_bool = utils_Huang.max_bool_func_1st_order_fd() Huang_min_bool = utils_Huang.min_bool_func_1st_order_fd() utils_Coughlin = emd_utils.Utility(time=time, time_series=Coughlin_wave) Coughlin_max_bool = utils_Coughlin.max_bool_func_1st_order_fd() Coughlin_min_bool = utils_Coughlin.min_bool_func_1st_order_fd() Huang_max_time = Huang_time[Huang_max_bool] Huang_max = Huang_wave[Huang_max_bool] Huang_min_time = Huang_time[Huang_min_bool] Huang_min = Huang_wave[Huang_min_bool] Coughlin_max_time = Coughlin_time[Coughlin_max_bool] Coughlin_max = Coughlin_wave[Coughlin_max_bool] Coughlin_min_time = Coughlin_time[Coughlin_min_bool] Coughlin_min = Coughlin_wave[Coughlin_min_bool] max_2_x_time = np.linspace(maxima_x[-2] - width, maxima_x[-2] + width, 101) max_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) max_2_x = maxima_y[-2] * np.ones_like(max_2_x_time) min_2_x_time = np.linspace(minima_x[-2] - width, minima_x[-2] + width, 101) min_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) min_2_x = minima_y[-2] * np.ones_like(min_2_x_time) dash_max_min_2_x = np.linspace(minima_y[-2], maxima_y[-2], 101) dash_max_min_2_x_time = 5.3 * np.pi * np.ones_like(dash_max_min_2_x) max_2_y = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) max_2_y_time = maxima_x[-2] * np.ones_like(max_2_y) min_2_y = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) min_2_y_time = minima_x[-2] * np.ones_like(min_2_y) dash_max_min_2_y_time = np.linspace(minima_x[-2], maxima_x[-2], 101) dash_max_min_2_y = -1.8 * np.ones_like(dash_max_min_2_y_time) max_1_x_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) max_1_x = maxima_y[-1] * np.ones_like(max_1_x_time) min_1_x_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) min_1_x = minima_y[-1] * np.ones_like(min_1_x_time) dash_max_min_1_x = np.linspace(minima_y[-1], maxima_y[-1], 101) dash_max_min_1_x_time = 5.4 * np.pi * np.ones_like(dash_max_min_1_x) max_1_y = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) max_1_y_time = maxima_x[-1] * np.ones_like(max_1_y) min_1_y = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) min_1_y_time = minima_x[-1] * np.ones_like(min_1_y) dash_max_min_1_y_time = np.linspace(minima_x[-1], maxima_x[-1], 101) dash_max_min_1_y = -2.1 * np.ones_like(dash_max_min_1_y_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Characteristic Wave Effects Example') plt.plot(time, time_series, LineWidth=2, label='Signal') plt.scatter(Huang_max_time, Huang_max, c='magenta', zorder=4, label=textwrap.fill('Huang maximum', 10)) plt.scatter(Huang_min_time, Huang_min, c='lime', zorder=4, label=textwrap.fill('Huang minimum', 10)) plt.scatter(Coughlin_max_time, Coughlin_max, c='darkorange', zorder=4, label=textwrap.fill('Coughlin maximum', 14)) plt.scatter(Coughlin_min_time, Coughlin_min, c='dodgerblue', zorder=4, label=textwrap.fill('Coughlin minimum', 14)) plt.scatter(Average_max_time, Average_max, c='orangered', zorder=4, label=textwrap.fill('Average maximum', 14)) plt.scatter(Average_min_time, Average_min, c='cyan', zorder=4, label=textwrap.fill('Average minimum', 14)) plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.plot(Huang_time, Huang_wave, '--', c='darkviolet', label=textwrap.fill('Huang Characteristic Wave', 14)) plt.plot(Coughlin_time, Coughlin_wave, '--', c='darkgreen', label=textwrap.fill('Coughlin Characteristic Wave', 14)) plt.plot(max_2_x_time, max_2_x, 'k-') plt.plot(max_2_x_time_side, max_2_x, 'k-') plt.plot(min_2_x_time, min_2_x, 'k-') plt.plot(min_2_x_time_side, min_2_x, 'k-') plt.plot(dash_max_min_2_x_time, dash_max_min_2_x, 'k--') plt.text(5.16 * np.pi, 0.85, r'$2a_2$') plt.plot(max_2_y_time, max_2_y, 'k-') plt.plot(max_2_y_time, max_2_y_side, 'k-') plt.plot(min_2_y_time, min_2_y, 'k-') plt.plot(min_2_y_time, min_2_y_side, 'k-') plt.plot(dash_max_min_2_y_time, dash_max_min_2_y, 'k--') plt.text(4.08 * np.pi, -2.2, r'$\frac{p_2}{2}$') plt.plot(max_1_x_time, max_1_x, 'k-') plt.plot(max_1_x_time_side, max_1_x, 'k-') plt.plot(min_1_x_time, min_1_x, 'k-') plt.plot(min_1_x_time_side, min_1_x, 'k-') plt.plot(dash_max_min_1_x_time, dash_max_min_1_x, 'k--') plt.text(5.42 * np.pi, -0.1, r'$2a_1$') plt.plot(max_1_y_time, max_1_y, 'k-') plt.plot(max_1_y_time, max_1_y_side, 'k-') plt.plot(min_1_y_time, min_1_y, 'k-') plt.plot(min_1_y_time, min_1_y_side, 'k-') plt.plot(dash_max_min_1_y_time, dash_max_min_1_y, 'k--') plt.text(4.48 * np.pi, -2.5, r'$\frac{p_1}{2}$') plt.xlim(3.9 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_characteristic_wave.png') plt.show() # plot 6 t = np.linspace(5, 95, 100) signal_orig = np.cos(2 * np.pi * t / 50) + 0.6 * np.cos(2 * np.pi * t / 25) + 0.5 * np.sin(2 * np.pi * t / 200) util_nn = emd_utils.Utility(time=t, time_series=signal_orig) maxima = signal_orig[util_nn.max_bool_func_1st_order_fd()] minima = signal_orig[util_nn.min_bool_func_1st_order_fd()] cs_max = CubicSpline(t[util_nn.max_bool_func_1st_order_fd()], maxima) cs_min = CubicSpline(t[util_nn.min_bool_func_1st_order_fd()], minima) time = np.linspace(0, 5 * np.pi, 1001) lsq_signal = np.cos(time) + np.cos(5 * time) knots = np.linspace(0, 5 * np.pi, 101) time_extended = time_extension(time) time_series_extended = np.zeros_like(time_extended) / 0 time_series_extended[int(len(lsq_signal) - 1):int(2 * (len(lsq_signal) - 1) + 1)] = lsq_signal neural_network_m = 200 neural_network_k = 100 # forward -> P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[(-(neural_network_m + neural_network_k - col)):(-(neural_network_m - col))] P[-1, col] = 1 # for additive constant t = lsq_signal[-neural_network_m:] # test - top seed_weights = np.ones(neural_network_k) / neural_network_k weights = 0 * seed_weights.copy() train_input = P[:-1, :] lr = 0.01 for iterations in range(1000): output = np.matmul(weights, train_input) error = (t - output) gradients = error * (- train_input) # guess average gradients average_gradients = np.mean(gradients, axis=1) # steepest descent max_gradient_vector = average_gradients * (np.abs(average_gradients) == max(np.abs(average_gradients))) adjustment = - lr * average_gradients # adjustment = - lr * max_gradient_vector weights += adjustment # test - bottom weights_right = np.hstack((weights, 0)) max_count_right = 0 min_count_right = 0 i_right = 0 while ((max_count_right < 1) or (min_count_right < 1)) and (i_right < len(lsq_signal) - 1): time_series_extended[int(2 * (len(lsq_signal) - 1) + 1 + i_right)] = \ sum(weights_right * np.hstack((time_series_extended[ int(2 * (len(lsq_signal) - 1) + 1 - neural_network_k + i_right): int(2 * (len(lsq_signal) - 1) + 1 + i_right)], 1))) i_right += 1 if i_right > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_right += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_right += 1 # backward <- P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[int(col + 1):int(col + neural_network_k + 1)] P[-1, col] = 1 # for additive constant t = lsq_signal[:neural_network_m] vx = cvx.Variable(int(neural_network_k + 1)) objective = cvx.Minimize(cvx.norm((2 * (vx * P) + 1 - t), 2)) # linear activation function is arbitrary prob = cvx.Problem(objective) result = prob.solve(verbose=True, solver=cvx.ECOS) weights_left = np.array(vx.value) max_count_left = 0 min_count_left = 0 i_left = 0 while ((max_count_left < 1) or (min_count_left < 1)) and (i_left < len(lsq_signal) - 1): time_series_extended[int(len(lsq_signal) - 2 - i_left)] = \ 2 * sum(weights_left * np.hstack((time_series_extended[int(len(lsq_signal) - 1 - i_left): int(len(lsq_signal) - 1 - i_left + neural_network_k)], 1))) + 1 i_left += 1 if i_left > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_left += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_left += 1 lsq_utils = emd_utils.Utility(time=time, time_series=lsq_signal) utils_extended = emd_utils.Utility(time=time_extended, time_series=time_series_extended) maxima = lsq_signal[lsq_utils.max_bool_func_1st_order_fd()] maxima_time = time[lsq_utils.max_bool_func_1st_order_fd()] maxima_extrapolate = time_series_extended[utils_extended.max_bool_func_1st_order_fd()][-1] maxima_extrapolate_time = time_extended[utils_extended.max_bool_func_1st_order_fd()][-1] minima = lsq_signal[lsq_utils.min_bool_func_1st_order_fd()] minima_time = time[lsq_utils.min_bool_func_1st_order_fd()] minima_extrapolate = time_series_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] minima_extrapolate_time = time_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Single Neuron Neural Network Example') plt.plot(time, lsq_signal, zorder=2, label='Signal') plt.plot(time_extended, time_series_extended, c='g', zorder=1, label=textwrap.fill('Extrapolated signal', 12)) plt.scatter(maxima_time, maxima, c='r', zorder=3, label='Maxima') plt.scatter(minima_time, minima, c='b', zorder=3, label='Minima') plt.scatter(maxima_extrapolate_time, maxima_extrapolate, c='magenta', zorder=3, label=textwrap.fill('Extrapolated maxima', 12)) plt.scatter(minima_extrapolate_time, minima_extrapolate, c='cyan', zorder=4, label=textwrap.fill('Extrapolated minima', 12)) plt.plot(((time[-302] + time[-301]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k', label=textwrap.fill('Neural network inputs', 13)) plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(((time_extended[-1001] + time_extended[-1002]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k') plt.plot(((time[-202] + time[-201]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed', label=textwrap.fill('Neural network targets', 13)) plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2), ((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2), ((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='gray') plt.plot(((time_extended[-1001] + time_extended[-1000]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed') plt.xlim(3.4 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/neural_network.png') plt.show() # plot 6a np.random.seed(0) time = np.linspace(0, 5 * np.pi, 1001) knots_51 = np.linspace(0, 5 * np.pi, 51) time_series = np.cos(2 * time) + np.cos(4 * time) + np.cos(8 * time) noise = np.random.normal(0, 1, len(time_series)) time_series += noise advemdpy = EMD(time=time, time_series=time_series) imfs_51, hts_51, ifs_51 = advemdpy.empirical_mode_decomposition(knots=knots_51, max_imfs=3, edge_effect='symmetric_anchor', verbose=False)[:3] knots_31 = np.linspace(0, 5 * np.pi, 31) imfs_31, hts_31, ifs_31 = advemdpy.empirical_mode_decomposition(knots=knots_31, max_imfs=2, edge_effect='symmetric_anchor', verbose=False)[:3] knots_11 = np.linspace(0, 5 * np.pi, 11) imfs_11, hts_11, ifs_11 = advemdpy.empirical_mode_decomposition(knots=knots_11, max_imfs=1, edge_effect='symmetric_anchor', verbose=False)[:3] fig, axs = plt.subplots(3, 1) plt.suptitle(textwrap.fill('Comparison of Trends Extracted with Different Knot Sequences', 40)) plt.subplots_adjust(hspace=0.1) axs[0].plot(time, time_series, label='Time series') axs[0].plot(time, imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 1, IMF 2, & IMF 3 with 51 knots', 21)) print(f'DFA fluctuation with 51 knots: {np.round(np.var(time_series - (imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :])), 3)}') for knot in knots_51: axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[0].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[0].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[0].set_xticklabels(['', '', '', '', '', '']) axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[0].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[0].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[0].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].plot(time, time_series, label='Time series') axs[1].plot(time, imfs_31[1, :] + imfs_31[2, :], label=textwrap.fill('Sum of IMF 1 and IMF 2 with 31 knots', 19)) axs[1].plot(time, imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 2 and IMF 3 with 51 knots', 19)) print(f'DFA fluctuation with 31 knots: {np.round(np.var(time_series - (imfs_31[1, :] + imfs_31[2, :])), 3)}') for knot in knots_31: axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[1].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[1].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[1].set_xticklabels(['', '', '', '', '', '']) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) axs[1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[1].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[1].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[1].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[1].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') axs[2].plot(time, time_series, label='Time series') axs[2].plot(time, imfs_11[1, :], label='IMF 1 with 11 knots') axs[2].plot(time, imfs_31[2, :], label='IMF 2 with 31 knots') axs[2].plot(time, imfs_51[3, :], label='IMF 3 with 51 knots') print(f'DFA fluctuation with 11 knots: {np.round(np.var(time_series - imfs_51[3, :]), 3)}') for knot in knots_11: axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1) axs[2].plot(knot * np.ones(101), np.linspace(-5, 5, 101), '--', c='grey', zorder=1, label='Knots') axs[2].set_xticks([0, np.pi, 2 * np.pi, 3 * np.pi, 4 * np.pi, 5 * np.pi]) axs[2].set_xticklabels(['$0$', r'$\pi$', r'$2\pi$', r'$3\pi$', r'$4\pi$', r'$5\pi$']) box_2 = axs[2].get_position() axs[2].set_position([box_2.x0 - 0.05, box_2.y0, box_2.width * 0.85, box_2.height]) axs[2].legend(loc='center left', bbox_to_anchor=(1, 0.5), fontsize=8) axs[2].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), 5.5 * np.ones(101), 'k--') axs[2].plot(np.linspace(0.95 * np.pi, 1.55 * np.pi, 101), -5.5 * np.ones(101), 'k--') axs[2].plot(0.95 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--') axs[2].plot(1.55 * np.pi * np.ones(101), np.linspace(-5.5, 5.5, 101), 'k--', label='Zoomed region') plt.savefig('jss_figures/DFA_different_trends.png') plt.show() # plot 6b fig, axs = plt.subplots(3, 1) plt.suptitle(textwrap.fill('Comparison of Trends Extracted with Different Knot Sequences Zoomed Region', 40)) plt.subplots_adjust(hspace=0.1) axs[0].plot(time, time_series, label='Time series') axs[0].plot(time, imfs_51[1, :] + imfs_51[2, :] + imfs_51[3, :], label=textwrap.fill('Sum of IMF 1, IMF 2, & IMF 3 with 51 knots', 21)) for knot in knots_51: axs[0].plot(knot *
np.ones(101)
numpy.ones
# coding: utf-8 # Licensed under a 3-clause BSD style license - see LICENSE.rst """ Test the Logarithmic Units and Quantities """ from __future__ import (absolute_import, unicode_literals, division, print_function) from ...extern import six from ...extern.six.moves import zip import pickle import itertools import pytest import numpy as np from numpy.testing.utils import assert_allclose from ...tests.helper import assert_quantity_allclose from ... import units as u, constants as c lu_units = [u.dex, u.mag, u.decibel] lu_subclasses = [u.DexUnit, u.MagUnit, u.DecibelUnit] lq_subclasses = [u.Dex, u.Magnitude, u.Decibel] pu_sample = (u.dimensionless_unscaled, u.m, u.g/u.s**2, u.Jy) class TestLogUnitCreation(object): def test_logarithmic_units(self): """Check logarithmic units are set up correctly.""" assert u.dB.to(u.dex) == 0.1 assert u.dex.to(u.mag) == -2.5 assert u.mag.to(u.dB) == -4 @pytest.mark.parametrize('lu_unit, lu_cls', zip(lu_units, lu_subclasses)) def test_callable_units(self, lu_unit, lu_cls): assert isinstance(lu_unit, u.UnitBase) assert callable(lu_unit) assert lu_unit._function_unit_class is lu_cls @pytest.mark.parametrize('lu_unit', lu_units) def test_equality_to_normal_unit_for_dimensionless(self, lu_unit): lu = lu_unit() assert lu == lu._default_function_unit # eg, MagUnit() == u.mag assert lu._default_function_unit == lu # and u.mag == MagUnit() @pytest.mark.parametrize('lu_unit, physical_unit', itertools.product(lu_units, pu_sample)) def test_call_units(self, lu_unit, physical_unit): """Create a LogUnit subclass using the callable unit and physical unit, and do basic check that output is right.""" lu1 = lu_unit(physical_unit) assert lu1.physical_unit == physical_unit assert lu1.function_unit == lu1._default_function_unit def test_call_invalid_unit(self): with pytest.raises(TypeError): u.mag([]) with pytest.raises(ValueError): u.mag(u.mag()) @pytest.mark.parametrize('lu_cls, physical_unit', itertools.product( lu_subclasses + [u.LogUnit], pu_sample)) def test_subclass_creation(self, lu_cls, physical_unit): """Create a LogUnit subclass object for given physical unit, and do basic check that output is right.""" lu1 = lu_cls(physical_unit) assert lu1.physical_unit == physical_unit assert lu1.function_unit == lu1._default_function_unit lu2 = lu_cls(physical_unit, function_unit=2*lu1._default_function_unit) assert lu2.physical_unit == physical_unit assert lu2.function_unit == u.Unit(2*lu2._default_function_unit) with pytest.raises(ValueError): lu_cls(physical_unit, u.m) def test_predefined_magnitudes(): assert_quantity_allclose((-21.1*u.STmag).physical, 1.*u.erg/u.cm**2/u.s/u.AA) assert_quantity_allclose((-48.6*u.ABmag).physical, 1.*u.erg/u.cm**2/u.s/u.Hz) assert_quantity_allclose((0*u.M_bol).physical, c.L_bol0) assert_quantity_allclose((0*u.m_bol).physical, c.L_bol0/(4.*np.pi*(10.*c.pc)**2)) def test_predefined_reinitialisation(): assert u.mag('ST') == u.STmag assert u.mag('AB') == u.ABmag assert u.mag('Bol') == u.M_bol assert u.mag('bol') == u.m_bol def test_predefined_string_roundtrip(): """Ensure roundtripping; see #5015""" with u.magnitude_zero_points.enable(): assert u.Unit(u.STmag.to_string()) == u.STmag assert u.Unit(u.ABmag.to_string()) == u.ABmag assert u.Unit(u.M_bol.to_string()) == u.M_bol assert u.Unit(u.m_bol.to_string()) == u.m_bol def test_inequality(): """Check __ne__ works (regresssion for #5342).""" lu1 = u.mag(u.Jy) lu2 = u.dex(u.Jy) lu3 = u.mag(u.Jy**2) lu4 = lu3 - lu1 assert lu1 != lu2 assert lu1 != lu3 assert lu1 == lu4 class TestLogUnitStrings(object): def test_str(self): """Do some spot checks that str, repr, etc. work as expected.""" lu1 = u.mag(u.Jy) assert str(lu1) == 'mag(Jy)' assert repr(lu1) == 'Unit("mag(Jy)")' assert lu1.to_string('generic') == 'mag(Jy)' with pytest.raises(ValueError): lu1.to_string('fits') lu2 = u.dex() assert str(lu2) == 'dex' assert repr(lu2) == 'Unit("dex(1)")' assert lu2.to_string() == 'dex(1)' lu3 = u.MagUnit(u.Jy, function_unit=2*u.mag) assert str(lu3) == '2 mag(Jy)' assert repr(lu3) == 'MagUnit("Jy", unit="2 mag")' assert lu3.to_string() == '2 mag(Jy)' lu4 = u.mag(u.ct) assert lu4.to_string('generic') == 'mag(ct)' assert lu4.to_string('latex') == ('$\\mathrm{mag}$$\\mathrm{\\left( ' '\\mathrm{ct} \\right)}$') assert lu4._repr_latex_() == lu4.to_string('latex') class TestLogUnitConversion(object): @pytest.mark.parametrize('lu_unit, physical_unit', itertools.product(lu_units, pu_sample)) def test_physical_unit_conversion(self, lu_unit, physical_unit): """Check various LogUnit subclasses are equivalent and convertible to their non-log counterparts.""" lu1 = lu_unit(physical_unit) assert lu1.is_equivalent(physical_unit) assert lu1.to(physical_unit, 0.) == 1. assert physical_unit.is_equivalent(lu1) assert physical_unit.to(lu1, 1.) == 0. pu = u.Unit(8.*physical_unit) assert lu1.is_equivalent(physical_unit) assert lu1.to(pu, 0.) == 0.125 assert pu.is_equivalent(lu1) assert_allclose(pu.to(lu1, 0.125), 0., atol=1.e-15) # Check we round-trip. value = np.linspace(0., 10., 6) assert_allclose(pu.to(lu1, lu1.to(pu, value)), value, atol=1.e-15) # And that we're not just returning True all the time. pu2 = u.g assert not lu1.is_equivalent(pu2) with pytest.raises(u.UnitsError): lu1.to(pu2) assert not pu2.is_equivalent(lu1) with pytest.raises(u.UnitsError): pu2.to(lu1) @pytest.mark.parametrize('lu_unit', lu_units) def test_container_unit_conversion(self, lu_unit): """Check that conversion to logarithmic units (u.mag, u.dB, u.dex) is only possible when the physical unit is dimensionless.""" values = np.linspace(0., 10., 6) lu1 = lu_unit(u.dimensionless_unscaled) assert lu1.is_equivalent(lu1.function_unit) assert_allclose(lu1.to(lu1.function_unit, values), values) lu2 = lu_unit(u.Jy) assert not lu2.is_equivalent(lu2.function_unit) with pytest.raises(u.UnitsError): lu2.to(lu2.function_unit, values) @pytest.mark.parametrize( 'flu_unit, tlu_unit, physical_unit', itertools.product(lu_units, lu_units, pu_sample)) def test_subclass_conversion(self, flu_unit, tlu_unit, physical_unit): """Check various LogUnit subclasses are equivalent and convertible to each other if they correspond to equivalent physical units.""" values = np.linspace(0., 10., 6) flu = flu_unit(physical_unit) tlu = tlu_unit(physical_unit) assert flu.is_equivalent(tlu) assert_allclose(flu.to(tlu), flu.function_unit.to(tlu.function_unit)) assert_allclose(flu.to(tlu, values), values * flu.function_unit.to(tlu.function_unit)) tlu2 = tlu_unit(u.Unit(100.*physical_unit)) assert flu.is_equivalent(tlu2) # Check that we round-trip. assert_allclose(flu.to(tlu2, tlu2.to(flu, values)), values, atol=1.e-15) tlu3 = tlu_unit(physical_unit.to_system(u.si)[0]) assert flu.is_equivalent(tlu3) assert_allclose(flu.to(tlu3, tlu3.to(flu, values)), values, atol=1.e-15) tlu4 = tlu_unit(u.g) assert not flu.is_equivalent(tlu4) with pytest.raises(u.UnitsError): flu.to(tlu4, values) def test_unit_decomposition(self): lu = u.mag(u.Jy) assert lu.decompose() == u.mag(u.Jy.decompose()) assert lu.decompose().physical_unit.bases == [u.kg, u.s] assert lu.si == u.mag(u.Jy.si) assert lu.si.physical_unit.bases == [u.kg, u.s] assert lu.cgs == u.mag(u.Jy.cgs) assert lu.cgs.physical_unit.bases == [u.g, u.s] def test_unit_multiple_possible_equivalencies(self): lu = u.mag(u.Jy) assert lu.is_equivalent(pu_sample) class TestLogUnitArithmetic(object): def test_multiplication_division(self): """Check that multiplication/division with other units is only possible when the physical unit is dimensionless, and that this turns the unit into a normal one.""" lu1 = u.mag(u.Jy) with pytest.raises(u.UnitsError): lu1 * u.m with pytest.raises(u.UnitsError): u.m * lu1 with pytest.raises(u.UnitsError): lu1 / lu1 for unit in (u.dimensionless_unscaled, u.m, u.mag, u.dex): with pytest.raises(u.UnitsError): lu1 / unit lu2 = u.mag(u.dimensionless_unscaled) with pytest.raises(u.UnitsError): lu2 * lu1 with pytest.raises(u.UnitsError): lu2 / lu1 # But dimensionless_unscaled can be cancelled. assert lu2 / lu2 == u.dimensionless_unscaled # With dimensionless, normal units are OK, but we return a plain unit. tf = lu2 * u.m tr = u.m * lu2 for t in (tf, tr): assert not isinstance(t, type(lu2)) assert t == lu2.function_unit * u.m with u.set_enabled_equivalencies(u.logarithmic()): with pytest.raises(u.UnitsError): t.to(lu2.physical_unit) # Now we essentially have a LogUnit with a prefactor of 100, # so should be equivalent again. t = tf / u.cm with u.set_enabled_equivalencies(u.logarithmic()): assert t.is_equivalent(lu2.function_unit) assert_allclose(t.to(u.dimensionless_unscaled, np.arange(3.)/100.), lu2.to(lu2.physical_unit, np.arange(3.))) # If we effectively remove lu1, a normal unit should be returned. t2 = tf / lu2 assert not isinstance(t2, type(lu2)) assert t2 == u.m t3 = tf / lu2.function_unit assert not isinstance(t3, type(lu2)) assert t3 == u.m # For completeness, also ensure non-sensical operations fail with pytest.raises(TypeError): lu1 * object() with pytest.raises(TypeError): slice(None) * lu1 with pytest.raises(TypeError): lu1 / [] with pytest.raises(TypeError): 1 / lu1 @pytest.mark.parametrize('power', (2, 0.5, 1, 0)) def test_raise_to_power(self, power): """Check that raising LogUnits to some power is only possible when the physical unit is dimensionless, and that conversion is turned off when the resulting logarithmic unit (such as mag**2) is incompatible.""" lu1 = u.mag(u.Jy) if power == 0: assert lu1 ** power == u.dimensionless_unscaled elif power == 1: assert lu1 ** power == lu1 else: with pytest.raises(u.UnitsError): lu1 ** power # With dimensionless, though, it works, but returns a normal unit. lu2 = u.mag(u.dimensionless_unscaled) t = lu2**power if power == 0: assert t == u.dimensionless_unscaled elif power == 1: assert t == lu2 else: assert not isinstance(t, type(lu2)) assert t == lu2.function_unit**power # also check we roundtrip t2 = t**(1./power) assert t2 == lu2.function_unit with u.set_enabled_equivalencies(u.logarithmic()): assert_allclose(t2.to(u.dimensionless_unscaled, np.arange(3.)), lu2.to(lu2.physical_unit, np.arange(3.))) @pytest.mark.parametrize('other', pu_sample) def test_addition_subtraction_to_normal_units_fails(self, other): lu1 = u.mag(u.Jy) with pytest.raises(u.UnitsError): lu1 + other with pytest.raises(u.UnitsError): lu1 - other with pytest.raises(u.UnitsError): other - lu1 def test_addition_subtraction_to_non_units_fails(self): lu1 = u.mag(u.Jy) with pytest.raises(TypeError): lu1 + 1. with pytest.raises(TypeError): lu1 - [1., 2., 3.] @pytest.mark.parametrize( 'other', (u.mag, u.mag(), u.mag(u.Jy), u.mag(u.m), u.Unit(2*u.mag), u.MagUnit('', 2.*u.mag))) def test_addition_subtraction(self, other): """Check physical units are changed appropriately""" lu1 = u.mag(u.Jy) other_pu = getattr(other, 'physical_unit', u.dimensionless_unscaled) lu_sf = lu1 + other assert lu_sf.is_equivalent(lu1.physical_unit * other_pu) lu_sr = other + lu1 assert lu_sr.is_equivalent(lu1.physical_unit * other_pu) lu_df = lu1 - other assert lu_df.is_equivalent(lu1.physical_unit / other_pu) lu_dr = other - lu1 assert lu_dr.is_equivalent(other_pu / lu1.physical_unit) def test_complicated_addition_subtraction(self): """for fun, a more complicated example of addition and subtraction""" dm0 = u.Unit('DM', 1./(4.*np.pi*(10.*u.pc)**2)) lu_dm = u.mag(dm0) lu_absST = u.STmag - lu_dm assert lu_absST.is_equivalent(u.erg/u.s/u.AA) def test_neg_pos(self): lu1 = u.mag(u.Jy) neg_lu = -lu1 assert neg_lu != lu1 assert neg_lu.physical_unit == u.Jy**-1 assert -neg_lu == lu1 pos_lu = +lu1 assert pos_lu is not lu1 assert pos_lu == lu1 def test_pickle(): lu1 = u.dex(u.cm/u.s**2) s = pickle.dumps(lu1) lu2 = pickle.loads(s) assert lu1 == lu2 def test_hashable(): lu1 = u.dB(u.mW) lu2 = u.dB(u.m) lu3 = u.dB(u.mW) assert hash(lu1) != hash(lu2) assert hash(lu1) == hash(lu3) luset = {lu1, lu2, lu3} assert len(luset) == 2 class TestLogQuantityCreation(object): @pytest.mark.parametrize('lq, lu', zip(lq_subclasses + [u.LogQuantity], lu_subclasses + [u.LogUnit])) def test_logarithmic_quantities(self, lq, lu): """Check logarithmic quantities are all set up correctly""" assert lq._unit_class == lu assert type(lu()._quantity_class(1.)) is lq @pytest.mark.parametrize('lq_cls, physical_unit', itertools.product(lq_subclasses, pu_sample)) def test_subclass_creation(self, lq_cls, physical_unit): """Create LogQuantity subclass objects for some physical units, and basic check on transformations""" value = np.arange(1., 10.) log_q = lq_cls(value * physical_unit) assert log_q.unit.physical_unit == physical_unit assert log_q.unit.function_unit == log_q.unit._default_function_unit assert_allclose(log_q.physical.value, value) with pytest.raises(ValueError): lq_cls(value, physical_unit) @pytest.mark.parametrize( 'unit', (u.mag, u.mag(), u.mag(u.Jy), u.mag(u.m), u.Unit(2*u.mag), u.MagUnit('', 2.*u.mag), u.MagUnit(u.Jy, -1*u.mag), u.MagUnit(u.m, -2.*u.mag))) def test_different_units(self, unit): q = u.Magnitude(1.23, unit) assert q.unit.function_unit == getattr(unit, 'function_unit', unit) assert q.unit.physical_unit is getattr(unit, 'physical_unit', u.dimensionless_unscaled) @pytest.mark.parametrize('value, unit', ( (1.*u.mag(u.Jy), None), (1.*u.dex(u.Jy), None), (1.*u.mag(u.W/u.m**2/u.Hz), u.mag(u.Jy)), (1.*u.dex(u.W/u.m**2/u.Hz), u.mag(u.Jy)))) def test_function_values(self, value, unit): lq = u.Magnitude(value, unit) assert lq == value assert lq.unit.function_unit == u.mag assert lq.unit.physical_unit == getattr(unit, 'physical_unit', value.unit.physical_unit) @pytest.mark.parametrize( 'unit', (u.mag(), u.mag(u.Jy), u.mag(u.m), u.MagUnit('', 2.*u.mag), u.MagUnit(u.Jy, -1*u.mag), u.MagUnit(u.m, -2.*u.mag))) def test_indirect_creation(self, unit): q1 = 2.5 * unit assert isinstance(q1, u.Magnitude) assert q1.value == 2.5 assert q1.unit == unit pv = 100. * unit.physical_unit q2 = unit * pv assert q2.unit == unit assert q2.unit.physical_unit == pv.unit assert q2.to_value(unit.physical_unit) == 100. assert (q2._function_view / u.mag).to_value(1) == -5. q3 = unit / 0.4 assert q3 == q1 def test_from_view(self): # Cannot view a physical quantity as a function quantity, since the # values would change. q = [100., 1000.] * u.cm/u.s**2 with pytest.raises(TypeError): q.view(u.Dex) # But fine if we have the right magnitude. q = [2., 3.] * u.dex lq = q.view(u.Dex) assert isinstance(lq, u.Dex) assert lq.unit.physical_unit == u.dimensionless_unscaled assert np.all(q == lq) def test_using_quantity_class(self): """Check that we can use Quantity if we have subok=True""" # following issue #5851 lu = u.dex(u.AA) with pytest.raises(u.UnitTypeError): u.Quantity(1., lu) q = u.Quantity(1., lu, subok=True) assert type(q) is lu._quantity_class def test_conversion_to_and_from_physical_quantities(): """Ensures we can convert from regular quantities.""" mst = [10., 12., 14.] * u.STmag flux_lambda = mst.physical mst_roundtrip = flux_lambda.to(u.STmag) # check we return a logquantity; see #5178. assert isinstance(mst_roundtrip, u.Magnitude) assert mst_roundtrip.unit == mst.unit assert_allclose(mst_roundtrip.value, mst.value) wave = [4956.8, 4959.55, 4962.3] * u.AA flux_nu = mst.to(u.Jy, equivalencies=u.spectral_density(wave)) mst_roundtrip2 = flux_nu.to(u.STmag, u.spectral_density(wave)) assert isinstance(mst_roundtrip2, u.Magnitude) assert mst_roundtrip2.unit == mst.unit assert_allclose(mst_roundtrip2.value, mst.value) def test_quantity_decomposition(): lq = 10.*u.mag(u.Jy) assert lq.decompose() == lq assert lq.decompose().unit.physical_unit.bases == [u.kg, u.s] assert lq.si == lq assert lq.si.unit.physical_unit.bases == [u.kg, u.s] assert lq.cgs == lq assert lq.cgs.unit.physical_unit.bases == [u.g, u.s] class TestLogQuantityViews(object): def setup(self): self.lq = u.Magnitude(np.arange(10.) * u.Jy) self.lq2 = u.Magnitude(np.arange(5.)) def test_value_view(self): lq_value = self.lq.value assert type(lq_value) is np.ndarray lq_value[2] = -1. assert np.all(self.lq.value == lq_value) def test_function_view(self): lq_fv = self.lq._function_view assert type(lq_fv) is u.Quantity assert lq_fv.unit is self.lq.unit.function_unit lq_fv[3] = -2. * lq_fv.unit assert
np.all(self.lq.value == lq_fv.value)
numpy.all
"""Test the search module""" from collections.abc import Iterable, Sized from io import StringIO from itertools import chain, product from functools import partial import pickle import sys from types import GeneratorType import re import numpy as np import scipy.sparse as sp import pytest from sklearn.utils.fixes import sp_version from sklearn.utils._testing import assert_raises from sklearn.utils._testing import assert_warns from sklearn.utils._testing import assert_warns_message from sklearn.utils._testing import assert_raise_message from sklearn.utils._testing import assert_array_equal from sklearn.utils._testing import assert_array_almost_equal from sklearn.utils._testing import assert_allclose from sklearn.utils._testing import assert_almost_equal from sklearn.utils._testing import ignore_warnings from sklearn.utils._mocking import CheckingClassifier, MockDataFrame from scipy.stats import bernoulli, expon, uniform from sklearn.base import BaseEstimator, ClassifierMixin from sklearn.base import clone from sklearn.exceptions import NotFittedError from sklearn.datasets import make_classification from sklearn.datasets import make_blobs from sklearn.datasets import make_multilabel_classification from sklearn.model_selection import fit_grid_point from sklearn.model_selection import train_test_split from sklearn.model_selection import KFold from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import StratifiedShuffleSplit from sklearn.model_selection import LeaveOneGroupOut from sklearn.model_selection import LeavePGroupsOut from sklearn.model_selection import GroupKFold from sklearn.model_selection import GroupShuffleSplit from sklearn.model_selection import GridSearchCV from sklearn.model_selection import RandomizedSearchCV from sklearn.model_selection import ParameterGrid from sklearn.model_selection import ParameterSampler from sklearn.model_selection._search import BaseSearchCV from sklearn.model_selection._validation import FitFailedWarning from sklearn.svm import LinearSVC, SVC from sklearn.tree import DecisionTreeRegressor from sklearn.tree import DecisionTreeClassifier from sklearn.cluster import KMeans from sklearn.neighbors import KernelDensity from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import f1_score from sklearn.metrics import recall_score from sklearn.metrics import accuracy_score from sklearn.metrics import make_scorer from sklearn.metrics import roc_auc_score from sklearn.metrics.pairwise import euclidean_distances from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline from sklearn.linear_model import Ridge, SGDClassifier, LinearRegression from sklearn.experimental import enable_hist_gradient_boosting # noqa from sklearn.ensemble import HistGradientBoostingClassifier from sklearn.model_selection.tests.common import OneTimeSplitter # Neither of the following two estimators inherit from BaseEstimator, # to test hyperparameter search on user-defined classifiers. class MockClassifier: """Dummy classifier to test the parameter search algorithms""" def __init__(self, foo_param=0): self.foo_param = foo_param def fit(self, X, Y): assert len(X) == len(Y) self.classes_ = np.unique(Y) return self def predict(self, T): return T.shape[0] def transform(self, X): return X + self.foo_param def inverse_transform(self, X): return X - self.foo_param predict_proba = predict predict_log_proba = predict decision_function = predict def score(self, X=None, Y=None): if self.foo_param > 1: score = 1. else: score = 0. return score def get_params(self, deep=False): return {'foo_param': self.foo_param} def set_params(self, **params): self.foo_param = params['foo_param'] return self class LinearSVCNoScore(LinearSVC): """An LinearSVC classifier that has no score method.""" @property def score(self): raise AttributeError X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) y = np.array([1, 1, 2, 2]) def assert_grid_iter_equals_getitem(grid): assert list(grid) == [grid[i] for i in range(len(grid))] @pytest.mark.parametrize("klass", [ParameterGrid, partial(ParameterSampler, n_iter=10)]) @pytest.mark.parametrize( "input, error_type, error_message", [(0, TypeError, r'Parameter .* is not a dict or a list \(0\)'), ([{'foo': [0]}, 0], TypeError, r'Parameter .* is not a dict \(0\)'), ({'foo': 0}, TypeError, "Parameter.* value is not iterable .*" r"\(key='foo', value=0\)")] ) def test_validate_parameter_input(klass, input, error_type, error_message): with pytest.raises(error_type, match=error_message): klass(input) def test_parameter_grid(): # Test basic properties of ParameterGrid. params1 = {"foo": [1, 2, 3]} grid1 = ParameterGrid(params1) assert isinstance(grid1, Iterable) assert isinstance(grid1, Sized) assert len(grid1) == 3 assert_grid_iter_equals_getitem(grid1) params2 = {"foo": [4, 2], "bar": ["ham", "spam", "eggs"]} grid2 = ParameterGrid(params2) assert len(grid2) == 6 # loop to assert we can iterate over the grid multiple times for i in range(2): # tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2) points = set(tuple(chain(*(sorted(p.items())))) for p in grid2) assert (points == set(("bar", x, "foo", y) for x, y in product(params2["bar"], params2["foo"]))) assert_grid_iter_equals_getitem(grid2) # Special case: empty grid (useful to get default estimator settings) empty = ParameterGrid({}) assert len(empty) == 1 assert list(empty) == [{}] assert_grid_iter_equals_getitem(empty) assert_raises(IndexError, lambda: empty[1]) has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}]) assert len(has_empty) == 4 assert list(has_empty) == [{'C': 1}, {'C': 10}, {}, {'C': .5}] assert_grid_iter_equals_getitem(has_empty) def test_grid_search(): # Test that the best estimator contains the right value for foo_param clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=3, verbose=3) # make sure it selects the smallest parameter in case of ties old_stdout = sys.stdout sys.stdout = StringIO() grid_search.fit(X, y) sys.stdout = old_stdout assert grid_search.best_estimator_.foo_param == 2 assert_array_equal(grid_search.cv_results_["param_foo_param"].data, [1, 2, 3]) # Smoke test the score etc: grid_search.score(X, y) grid_search.predict_proba(X) grid_search.decision_function(X) grid_search.transform(X) # Test exception handling on scoring grid_search.scoring = 'sklearn' assert_raises(ValueError, grid_search.fit, X, y) def test_grid_search_pipeline_steps(): # check that parameters that are estimators are cloned before fitting pipe = Pipeline([('regressor', LinearRegression())]) param_grid = {'regressor': [LinearRegression(), Ridge()]} grid_search = GridSearchCV(pipe, param_grid, cv=2) grid_search.fit(X, y) regressor_results = grid_search.cv_results_['param_regressor'] assert isinstance(regressor_results[0], LinearRegression) assert isinstance(regressor_results[1], Ridge) assert not hasattr(regressor_results[0], 'coef_') assert not hasattr(regressor_results[1], 'coef_') assert regressor_results[0] is not grid_search.best_estimator_ assert regressor_results[1] is not grid_search.best_estimator_ # check that we didn't modify the parameter grid that was passed assert not hasattr(param_grid['regressor'][0], 'coef_') assert not hasattr(param_grid['regressor'][1], 'coef_') @pytest.mark.parametrize("SearchCV", [GridSearchCV, RandomizedSearchCV]) def test_SearchCV_with_fit_params(SearchCV): X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = CheckingClassifier(expected_fit_params=['spam', 'eggs']) searcher = SearchCV( clf, {'foo_param': [1, 2, 3]}, cv=2, error_score="raise" ) # The CheckingClassifier generates an assertion error if # a parameter is missing or has length != len(X). err_msg = r"Expected fit parameter\(s\) \['eggs'\] not seen." with pytest.raises(AssertionError, match=err_msg): searcher.fit(X, y, spam=np.ones(10)) err_msg = "Fit parameter spam has length 1; expected" with pytest.raises(AssertionError, match=err_msg): searcher.fit(X, y, spam=np.ones(1), eggs=np.zeros(10)) searcher.fit(X, y, spam=np.ones(10), eggs=np.zeros(10)) @ignore_warnings def test_grid_search_no_score(): # Test grid-search on classifier that has no score function. clf = LinearSVC(random_state=0) X, y = make_blobs(random_state=0, centers=2) Cs = [.1, 1, 10] clf_no_score = LinearSVCNoScore(random_state=0) grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy') grid_search.fit(X, y) grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs}, scoring='accuracy') # smoketest grid search grid_search_no_score.fit(X, y) # check that best params are equal assert grid_search_no_score.best_params_ == grid_search.best_params_ # check that we can call score and that it gives the correct result assert grid_search.score(X, y) == grid_search_no_score.score(X, y) # giving no scoring function raises an error grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs}) assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit, [[1]]) def test_grid_search_score_method(): X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2, random_state=0) clf = LinearSVC(random_state=0) grid = {'C': [.1]} search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y) search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y) search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid, scoring='roc_auc' ).fit(X, y) search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y) # Check warning only occurs in situation where behavior changed: # estimator requires score method to compete with scoring parameter score_no_scoring = search_no_scoring.score(X, y) score_accuracy = search_accuracy.score(X, y) score_no_score_auc = search_no_score_method_auc.score(X, y) score_auc = search_auc.score(X, y) # ensure the test is sane assert score_auc < 1.0 assert score_accuracy < 1.0 assert score_auc != score_accuracy assert_almost_equal(score_accuracy, score_no_scoring) assert_almost_equal(score_auc, score_no_score_auc) def test_grid_search_groups(): # Check if ValueError (when groups is None) propagates to GridSearchCV # And also check if groups is correctly passed to the cv object rng = np.random.RandomState(0) X, y = make_classification(n_samples=15, n_classes=2, random_state=0) groups = rng.randint(0, 3, 15) clf = LinearSVC(random_state=0) grid = {'C': [1]} group_cvs = [LeaveOneGroupOut(), LeavePGroupsOut(2), GroupKFold(n_splits=3), GroupShuffleSplit()] for cv in group_cvs: gs = GridSearchCV(clf, grid, cv=cv) assert_raise_message(ValueError, "The 'groups' parameter should not be None.", gs.fit, X, y) gs.fit(X, y, groups=groups) non_group_cvs = [StratifiedKFold(), StratifiedShuffleSplit()] for cv in non_group_cvs: gs = GridSearchCV(clf, grid, cv=cv) # Should not raise an error gs.fit(X, y) def test_classes__property(): # Test that classes_ property matches best_estimator_.classes_ X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) Cs = [.1, 1, 10] grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs}) grid_search.fit(X, y) assert_array_equal(grid_search.best_estimator_.classes_, grid_search.classes_) # Test that regressors do not have a classes_ attribute grid_search = GridSearchCV(Ridge(), {'alpha': [1.0, 2.0]}) grid_search.fit(X, y) assert not hasattr(grid_search, 'classes_') # Test that the grid searcher has no classes_ attribute before it's fit grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs}) assert not hasattr(grid_search, 'classes_') # Test that the grid searcher has no classes_ attribute without a refit grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs}, refit=False) grid_search.fit(X, y) assert not hasattr(grid_search, 'classes_') def test_trivial_cv_results_attr(): # Test search over a "grid" with only one point. clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1]}, cv=3) grid_search.fit(X, y) assert hasattr(grid_search, "cv_results_") random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1, cv=3) random_search.fit(X, y) assert hasattr(grid_search, "cv_results_") def test_no_refit(): # Test that GSCV can be used for model selection alone without refitting clf = MockClassifier() for scoring in [None, ['accuracy', 'precision']]: grid_search = GridSearchCV( clf, {'foo_param': [1, 2, 3]}, refit=False, cv=3 ) grid_search.fit(X, y) assert not hasattr(grid_search, "best_estimator_") and \ hasattr(grid_search, "best_index_") and \ hasattr(grid_search, "best_params_") # Make sure the functions predict/transform etc raise meaningful # error messages for fn_name in ('predict', 'predict_proba', 'predict_log_proba', 'transform', 'inverse_transform'): assert_raise_message(NotFittedError, ('refit=False. %s is available only after ' 'refitting on the best parameters' % fn_name), getattr(grid_search, fn_name), X) # Test that an invalid refit param raises appropriate error messages for refit in ["", 5, True, 'recall', 'accuracy']: assert_raise_message(ValueError, "For multi-metric scoring, the " "parameter refit must be set to a scorer key", GridSearchCV(clf, {}, refit=refit, scoring={'acc': 'accuracy', 'prec': 'precision'} ).fit, X, y) def test_grid_search_error(): # Test that grid search will capture errors on data with different length X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) assert_raises(ValueError, cv.fit, X_[:180], y_) def test_grid_search_one_grid_point(): X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]} clf = SVC(gamma='auto') cv = GridSearchCV(clf, param_dict) cv.fit(X_, y_) clf = SVC(C=1.0, kernel="rbf", gamma=0.1) clf.fit(X_, y_) assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_) def test_grid_search_when_param_grid_includes_range(): # Test that the best estimator contains the right value for foo_param clf = MockClassifier() grid_search = None grid_search = GridSearchCV(clf, {'foo_param': range(1, 4)}, cv=3) grid_search.fit(X, y) assert grid_search.best_estimator_.foo_param == 2 def test_grid_search_bad_param_grid(): param_dict = {"C": 1} clf = SVC(gamma='auto') assert_raise_message( ValueError, "Parameter grid for parameter (C) needs to" " be a list or numpy array, but got (<class 'int'>)." " Single values need to be wrapped in a list" " with one element.", GridSearchCV, clf, param_dict) param_dict = {"C": []} clf = SVC() assert_raise_message( ValueError, "Parameter values for parameter (C) need to be a non-empty sequence.", GridSearchCV, clf, param_dict) param_dict = {"C": "1,2,3"} clf = SVC(gamma='auto') assert_raise_message( ValueError, "Parameter grid for parameter (C) needs to" " be a list or numpy array, but got (<class 'str'>)." " Single values need to be wrapped in a list" " with one element.", GridSearchCV, clf, param_dict) param_dict = {"C": np.ones((3, 2))} clf = SVC() assert_raises(ValueError, GridSearchCV, clf, param_dict) def test_grid_search_sparse(): # Test that grid search works with both dense and sparse matrices X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) cv.fit(X_[:180], y_[:180]) y_pred = cv.predict(X_[180:]) C = cv.best_estimator_.C X_ = sp.csr_matrix(X_) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) cv.fit(X_[:180].tocoo(), y_[:180]) y_pred2 = cv.predict(X_[180:]) C2 = cv.best_estimator_.C assert np.mean(y_pred == y_pred2) >= .9 assert C == C2 def test_grid_search_sparse_scoring(): X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1") cv.fit(X_[:180], y_[:180]) y_pred = cv.predict(X_[180:]) C = cv.best_estimator_.C X_ = sp.csr_matrix(X_) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1") cv.fit(X_[:180], y_[:180]) y_pred2 = cv.predict(X_[180:]) C2 = cv.best_estimator_.C assert_array_equal(y_pred, y_pred2) assert C == C2 # Smoke test the score # np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]), # cv.score(X_[:180], y[:180])) # test loss where greater is worse def f1_loss(y_true_, y_pred_): return -f1_score(y_true_, y_pred_) F1Loss = make_scorer(f1_loss, greater_is_better=False) cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss) cv.fit(X_[:180], y_[:180]) y_pred3 = cv.predict(X_[180:]) C3 = cv.best_estimator_.C assert C == C3 assert_array_equal(y_pred, y_pred3) def test_grid_search_precomputed_kernel(): # Test that grid search works when the input features are given in the # form of a precomputed kernel matrix X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) # compute the training kernel matrix corresponding to the linear kernel K_train = np.dot(X_[:180], X_[:180].T) y_train = y_[:180] clf = SVC(kernel='precomputed') cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) cv.fit(K_train, y_train) assert cv.best_score_ >= 0 # compute the test kernel matrix K_test = np.dot(X_[180:], X_[:180].T) y_test = y_[180:] y_pred = cv.predict(K_test) assert np.mean(y_pred == y_test) >= 0 # test error is raised when the precomputed kernel is not array-like # or sparse assert_raises(ValueError, cv.fit, K_train.tolist(), y_train) def test_grid_search_precomputed_kernel_error_nonsquare(): # Test that grid search returns an error with a non-square precomputed # training kernel matrix K_train = np.zeros((10, 20)) y_train = np.ones((10, )) clf = SVC(kernel='precomputed') cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) assert_raises(ValueError, cv.fit, K_train, y_train) class BrokenClassifier(BaseEstimator): """Broken classifier that cannot be fit twice""" def __init__(self, parameter=None): self.parameter = parameter def fit(self, X, y): assert not hasattr(self, 'has_been_fit_') self.has_been_fit_ = True def predict(self, X): return np.zeros(X.shape[0]) @ignore_warnings def test_refit(): # Regression test for bug in refitting # Simulates re-fitting a broken estimator; this used to break with # sparse SVMs. X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}], scoring="precision", refit=True) clf.fit(X, y) def test_refit_callable(): """ Test refit=callable, which adds flexibility in identifying the "best" estimator. """ def refit_callable(cv_results): """ A dummy function tests `refit=callable` interface. Return the index of a model that has the least `mean_test_score`. """ # Fit a dummy clf with `refit=True` to get a list of keys in # clf.cv_results_. X, y = make_classification(n_samples=100, n_features=4, random_state=42) clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.01, 0.1, 1]}, scoring='precision', refit=True) clf.fit(X, y) # Ensure that `best_index_ != 0` for this dummy clf assert clf.best_index_ != 0 # Assert every key matches those in `cv_results` for key in clf.cv_results_.keys(): assert key in cv_results return cv_results['mean_test_score'].argmin() X, y = make_classification(n_samples=100, n_features=4, random_state=42) clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.01, 0.1, 1]}, scoring='precision', refit=refit_callable) clf.fit(X, y) assert clf.best_index_ == 0 # Ensure `best_score_` is disabled when using `refit=callable` assert not hasattr(clf, 'best_score_') def test_refit_callable_invalid_type(): """ Test implementation catches the errors when 'best_index_' returns an invalid result. """ def refit_callable_invalid_type(cv_results): """ A dummy function tests when returned 'best_index_' is not integer. """ return None X, y = make_classification(n_samples=100, n_features=4, random_state=42) clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.1, 1]}, scoring='precision', refit=refit_callable_invalid_type) with pytest.raises(TypeError, match='best_index_ returned is not an integer'): clf.fit(X, y) @pytest.mark.parametrize('out_bound_value', [-1, 2]) @pytest.mark.parametrize('search_cv', [RandomizedSearchCV, GridSearchCV]) def test_refit_callable_out_bound(out_bound_value, search_cv): """ Test implementation catches the errors when 'best_index_' returns an out of bound result. """ def refit_callable_out_bound(cv_results): """ A dummy function tests when returned 'best_index_' is out of bounds. """ return out_bound_value X, y = make_classification(n_samples=100, n_features=4, random_state=42) clf = search_cv(LinearSVC(random_state=42), {'C': [0.1, 1]}, scoring='precision', refit=refit_callable_out_bound) with pytest.raises(IndexError, match='best_index_ index out of range'): clf.fit(X, y) def test_refit_callable_multi_metric(): """ Test refit=callable in multiple metric evaluation setting """ def refit_callable(cv_results): """ A dummy function tests `refit=callable` interface. Return the index of a model that has the least `mean_test_prec`. """ assert 'mean_test_prec' in cv_results return cv_results['mean_test_prec'].argmin() X, y = make_classification(n_samples=100, n_features=4, random_state=42) scoring = {'Accuracy': make_scorer(accuracy_score), 'prec': 'precision'} clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.01, 0.1, 1]}, scoring=scoring, refit=refit_callable) clf.fit(X, y) assert clf.best_index_ == 0 # Ensure `best_score_` is disabled when using `refit=callable` assert not hasattr(clf, 'best_score_') def test_gridsearch_nd(): # Pass X as list in GridSearchCV X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2) y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11) check_X = lambda x: x.shape[1:] == (5, 3, 2) check_y = lambda x: x.shape[1:] == (7, 11) clf = CheckingClassifier( check_X=check_X, check_y=check_y, methods_to_check=["fit"], ) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}) grid_search.fit(X_4d, y_3d).score(X, y) assert hasattr(grid_search, "cv_results_") def test_X_as_list(): # Pass X as list in GridSearchCV X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = CheckingClassifier( check_X=lambda x: isinstance(x, list), methods_to_check=["fit"], ) cv = KFold(n_splits=3) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv) grid_search.fit(X.tolist(), y).score(X, y) assert hasattr(grid_search, "cv_results_") def test_y_as_list(): # Pass y as list in GridSearchCV X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = CheckingClassifier( check_y=lambda x: isinstance(x, list), methods_to_check=["fit"], ) cv = KFold(n_splits=3) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv) grid_search.fit(X, y.tolist()).score(X, y) assert hasattr(grid_search, "cv_results_") @ignore_warnings def test_pandas_input(): # check cross_val_score doesn't destroy pandas dataframe types = [(MockDataFrame, MockDataFrame)] try: from pandas import Series, DataFrame types.append((DataFrame, Series)) except ImportError: pass X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) for InputFeatureType, TargetType in types: # X dataframe, y series X_df, y_ser = InputFeatureType(X), TargetType(y) def check_df(x): return isinstance(x, InputFeatureType) def check_series(x): return isinstance(x, TargetType) clf = CheckingClassifier(check_X=check_df, check_y=check_series) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}) grid_search.fit(X_df, y_ser).score(X_df, y_ser) grid_search.predict(X_df) assert hasattr(grid_search, "cv_results_") def test_unsupervised_grid_search(): # test grid-search with unsupervised estimator X, y = make_blobs(n_samples=50, random_state=0) km = KMeans(random_state=0, init="random", n_init=1) # Multi-metric evaluation unsupervised scoring = ['adjusted_rand_score', 'fowlkes_mallows_score'] for refit in ['adjusted_rand_score', 'fowlkes_mallows_score']: grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]), scoring=scoring, refit=refit) grid_search.fit(X, y) # Both ARI and FMS can find the right number :) assert grid_search.best_params_["n_clusters"] == 3 # Single metric evaluation unsupervised grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]), scoring='fowlkes_mallows_score') grid_search.fit(X, y) assert grid_search.best_params_["n_clusters"] == 3 # Now without a score, and without y grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4])) grid_search.fit(X) assert grid_search.best_params_["n_clusters"] == 4 def test_gridsearch_no_predict(): # test grid-search with an estimator without predict. # slight duplication of a test from KDE def custom_scoring(estimator, X): return 42 if estimator.bandwidth == .1 else 0 X, _ = make_blobs(cluster_std=.1, random_state=1, centers=[[0, 1], [1, 0], [0, 0]]) search = GridSearchCV(KernelDensity(), param_grid=dict(bandwidth=[.01, .1, 1]), scoring=custom_scoring) search.fit(X) assert search.best_params_['bandwidth'] == .1 assert search.best_score_ == 42 def test_param_sampler(): # test basic properties of param sampler param_distributions = {"kernel": ["rbf", "linear"], "C": uniform(0, 1)} sampler = ParameterSampler(param_distributions=param_distributions, n_iter=10, random_state=0) samples = [x for x in sampler] assert len(samples) == 10 for sample in samples: assert sample["kernel"] in ["rbf", "linear"] assert 0 <= sample["C"] <= 1 # test that repeated calls yield identical parameters param_distributions = {"C": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} sampler = ParameterSampler(param_distributions=param_distributions, n_iter=3, random_state=0) assert [x for x in sampler] == [x for x in sampler] if sp_version >= (0, 16): param_distributions = {"C": uniform(0, 1)} sampler = ParameterSampler(param_distributions=param_distributions, n_iter=10, random_state=0) assert [x for x in sampler] == [x for x in sampler] def check_cv_results_array_types(search, param_keys, score_keys): # Check if the search `cv_results`'s array are of correct types cv_results = search.cv_results_ assert all(isinstance(cv_results[param], np.ma.MaskedArray) for param in param_keys) assert all(cv_results[key].dtype == object for key in param_keys) assert not any(isinstance(cv_results[key], np.ma.MaskedArray) for key in score_keys) assert all(cv_results[key].dtype == np.float64 for key in score_keys if not key.startswith('rank')) scorer_keys = search.scorer_.keys() if search.multimetric_ else ['score'] for key in scorer_keys: assert cv_results['rank_test_%s' % key].dtype == np.int32 def check_cv_results_keys(cv_results, param_keys, score_keys, n_cand): # Test the search.cv_results_ contains all the required results assert_array_equal(sorted(cv_results.keys()), sorted(param_keys + score_keys + ('params',))) assert all(cv_results[key].shape == (n_cand,) for key in param_keys + score_keys) def test_grid_search_cv_results(): X, y = make_classification(n_samples=50, n_features=4, random_state=42) n_splits = 3 n_grid_points = 6 params = [dict(kernel=['rbf', ], C=[1, 10], gamma=[0.1, 1]), dict(kernel=['poly', ], degree=[1, 2])] param_keys = ('param_C', 'param_degree', 'param_gamma', 'param_kernel') score_keys = ('mean_test_score', 'mean_train_score', 'rank_test_score', 'split0_test_score', 'split1_test_score', 'split2_test_score', 'split0_train_score', 'split1_train_score', 'split2_train_score', 'std_test_score', 'std_train_score', 'mean_fit_time', 'std_fit_time', 'mean_score_time', 'std_score_time') n_candidates = n_grid_points search = GridSearchCV(SVC(), cv=n_splits, param_grid=params, return_train_score=True) search.fit(X, y) cv_results = search.cv_results_ # Check if score and timing are reasonable assert all(cv_results['rank_test_score'] >= 1) assert (all(cv_results[k] >= 0) for k in score_keys if k != 'rank_test_score') assert (all(cv_results[k] <= 1) for k in score_keys if 'time' not in k and k != 'rank_test_score') # Check cv_results structure check_cv_results_array_types(search, param_keys, score_keys) check_cv_results_keys(cv_results, param_keys, score_keys, n_candidates) # Check masking cv_results = search.cv_results_ n_candidates = len(search.cv_results_['params']) assert all((cv_results['param_C'].mask[i] and cv_results['param_gamma'].mask[i] and not cv_results['param_degree'].mask[i]) for i in range(n_candidates) if cv_results['param_kernel'][i] == 'linear') assert all((not cv_results['param_C'].mask[i] and not cv_results['param_gamma'].mask[i] and cv_results['param_degree'].mask[i]) for i in range(n_candidates) if cv_results['param_kernel'][i] == 'rbf') def test_random_search_cv_results(): X, y = make_classification(n_samples=50, n_features=4, random_state=42) n_splits = 3 n_search_iter = 30 params = [{'kernel': ['rbf'], 'C': expon(scale=10), 'gamma': expon(scale=0.1)}, {'kernel': ['poly'], 'degree': [2, 3]}] param_keys = ('param_C', 'param_degree', 'param_gamma', 'param_kernel') score_keys = ('mean_test_score', 'mean_train_score', 'rank_test_score', 'split0_test_score', 'split1_test_score', 'split2_test_score', 'split0_train_score', 'split1_train_score', 'split2_train_score', 'std_test_score', 'std_train_score', 'mean_fit_time', 'std_fit_time', 'mean_score_time', 'std_score_time') n_cand = n_search_iter search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_splits, param_distributions=params, return_train_score=True) search.fit(X, y) cv_results = search.cv_results_ # Check results structure check_cv_results_array_types(search, param_keys, score_keys) check_cv_results_keys(cv_results, param_keys, score_keys, n_cand) n_candidates = len(search.cv_results_['params']) assert all((cv_results['param_C'].mask[i] and cv_results['param_gamma'].mask[i] and not cv_results['param_degree'].mask[i]) for i in range(n_candidates) if cv_results['param_kernel'][i] == 'linear') assert all((not cv_results['param_C'].mask[i] and not cv_results['param_gamma'].mask[i] and cv_results['param_degree'].mask[i]) for i in range(n_candidates) if cv_results['param_kernel'][i] == 'rbf') @pytest.mark.parametrize( "SearchCV, specialized_params", [(GridSearchCV, {'param_grid': {'C': [1, 10]}}), (RandomizedSearchCV, {'param_distributions': {'C': [1, 10]}, 'n_iter': 2})] ) def test_search_default_iid(SearchCV, specialized_params): # Test the IID parameter TODO: Clearly this test does something else??? # noise-free simple 2d-data X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0, cluster_std=0.1, shuffle=False, n_samples=80) # split dataset into two folds that are not iid # first one contains data of all 4 blobs, second only from two. mask = np.ones(X.shape[0], dtype=np.bool) mask[np.where(y == 1)[0][::2]] = 0 mask[np.where(y == 2)[0][::2]] = 0 # this leads to perfect classification on one fold and a score of 1/3 on # the other # create "cv" for splits cv = [[mask, ~mask], [~mask, mask]] common_params = {'estimator': SVC(), 'cv': cv, 'return_train_score': True} search = SearchCV(**common_params, **specialized_params) search.fit(X, y) test_cv_scores = np.array( [search.cv_results_['split%d_test_score' % s][0] for s in range(search.n_splits_)] ) test_mean = search.cv_results_['mean_test_score'][0] test_std = search.cv_results_['std_test_score'][0] train_cv_scores = np.array( [search.cv_results_['split%d_train_score' % s][0] for s in range(search.n_splits_)] ) train_mean = search.cv_results_['mean_train_score'][0] train_std = search.cv_results_['std_train_score'][0] assert search.cv_results_['param_C'][0] == 1 # scores are the same as above assert_allclose(test_cv_scores, [1, 1. / 3.]) assert_allclose(train_cv_scores, [1, 1]) # Unweighted mean/std is used assert test_mean == pytest.approx(np.mean(test_cv_scores)) assert test_std == pytest.approx(np.std(test_cv_scores)) # For the train scores, we do not take a weighted mean irrespective of # i.i.d. or not assert train_mean == pytest.approx(1) assert train_std == pytest.approx(0) def test_grid_search_cv_results_multimetric(): X, y = make_classification(n_samples=50, n_features=4, random_state=42) n_splits = 3 params = [dict(kernel=['rbf', ], C=[1, 10], gamma=[0.1, 1]), dict(kernel=['poly', ], degree=[1, 2])] grid_searches = [] for scoring in ({'accuracy': make_scorer(accuracy_score), 'recall': make_scorer(recall_score)}, 'accuracy', 'recall'): grid_search = GridSearchCV(SVC(), cv=n_splits, param_grid=params, scoring=scoring, refit=False) grid_search.fit(X, y) grid_searches.append(grid_search) compare_cv_results_multimetric_with_single(*grid_searches) def test_random_search_cv_results_multimetric(): X, y = make_classification(n_samples=50, n_features=4, random_state=42) n_splits = 3 n_search_iter = 30 # Scipy 0.12's stats dists do not accept seed, hence we use param grid params = dict(C=np.logspace(-4, 1, 3), gamma=np.logspace(-5, 0, 3, base=0.1)) for refit in (True, False): random_searches = [] for scoring in (('accuracy', 'recall'), 'accuracy', 'recall'): # If True, for multi-metric pass refit='accuracy' if refit: probability = True refit = 'accuracy' if isinstance(scoring, tuple) else refit else: probability = False clf = SVC(probability=probability, random_state=42) random_search = RandomizedSearchCV(clf, n_iter=n_search_iter, cv=n_splits, param_distributions=params, scoring=scoring, refit=refit, random_state=0) random_search.fit(X, y) random_searches.append(random_search) compare_cv_results_multimetric_with_single(*random_searches) compare_refit_methods_when_refit_with_acc( random_searches[0], random_searches[1], refit) def compare_cv_results_multimetric_with_single( search_multi, search_acc, search_rec): """Compare multi-metric cv_results with the ensemble of multiple single metric cv_results from single metric grid/random search""" assert search_multi.multimetric_ assert_array_equal(sorted(search_multi.scorer_), ('accuracy', 'recall')) cv_results_multi = search_multi.cv_results_ cv_results_acc_rec = {re.sub('_score$', '_accuracy', k): v for k, v in search_acc.cv_results_.items()} cv_results_acc_rec.update({re.sub('_score$', '_recall', k): v for k, v in search_rec.cv_results_.items()}) # Check if score and timing are reasonable, also checks if the keys # are present assert all((
np.all(cv_results_multi[k] <= 1)
numpy.all
# pylint: disable=protected-access """ Test the wrappers for the C API. """ import os from contextlib import contextmanager import numpy as np import numpy.testing as npt import pandas as pd import pytest import xarray as xr from packaging.version import Version from pygmt import Figure, clib from pygmt.clib.conversion import dataarray_to_matrix from pygmt.clib.session import FAMILIES, VIAS from pygmt.exceptions import ( GMTCLibError, GMTCLibNoSessionError, GMTInvalidInput, GMTVersionError, ) from pygmt.helpers import GMTTempFile TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data") with clib.Session() as _lib: gmt_version = Version(_lib.info["version"]) @contextmanager def mock(session, func, returns=None, mock_func=None): """ Mock a GMT C API function to make it always return a given value. Used to test that exceptions are raised when API functions fail by producing a NULL pointer as output or non-zero status codes. Needed because it's not easy to get some API functions to fail without inducing a Segmentation Fault (which is a good thing because libgmt usually only fails with errors). """ if mock_func is None: def mock_api_function(*args): # pylint: disable=unused-argument """ A mock GMT API function that always returns a given value. """ return returns mock_func = mock_api_function get_libgmt_func = session.get_libgmt_func def mock_get_libgmt_func(name, argtypes=None, restype=None): """ Return our mock function. """ if name == func: return mock_func return get_libgmt_func(name, argtypes, restype) setattr(session, "get_libgmt_func", mock_get_libgmt_func) yield setattr(session, "get_libgmt_func", get_libgmt_func) def test_getitem(): """ Test that I can get correct constants from the C lib. """ ses = clib.Session() assert ses["GMT_SESSION_EXTERNAL"] != -99999 assert ses["GMT_MODULE_CMD"] != -99999 assert ses["GMT_PAD_DEFAULT"] != -99999 assert ses["GMT_DOUBLE"] != -99999 with pytest.raises(GMTCLibError): ses["A_WHOLE_LOT_OF_JUNK"] # pylint: disable=pointless-statement def test_create_destroy_session(): """ Test that create and destroy session are called without errors. """ # Create two session and make sure they are not pointing to the same memory session1 = clib.Session() session1.create(name="test_session1") assert session1.session_pointer is not None session2 = clib.Session() session2.create(name="test_session2") assert session2.session_pointer is not None assert session2.session_pointer != session1.session_pointer session1.destroy() session2.destroy() # Create and destroy a session twice ses = clib.Session() for __ in range(2): with pytest.raises(GMTCLibNoSessionError): ses.session_pointer # pylint: disable=pointless-statement ses.create("session1") assert ses.session_pointer is not None ses.destroy() with pytest.raises(GMTCLibNoSessionError): ses.session_pointer # pylint: disable=pointless-statement def test_create_session_fails(): """ Check that an exception is raised when failing to create a session. """ ses = clib.Session() with mock(ses, "GMT_Create_Session", returns=None): with pytest.raises(GMTCLibError): ses.create("test-session-name") # Should fail if trying to create a session before destroying the old one. ses.create("test1") with pytest.raises(GMTCLibError): ses.create("test2") def test_destroy_session_fails(): """ Fail to destroy session when given bad input. """ ses = clib.Session() with pytest.raises(GMTCLibNoSessionError): ses.destroy() ses.create("test-session") with mock(ses, "GMT_Destroy_Session", returns=1): with pytest.raises(GMTCLibError): ses.destroy() ses.destroy() def test_call_module(): """ Run a command to see if call_module works. """ data_fname = os.path.join(TEST_DATA_DIR, "points.txt") out_fname = "test_call_module.txt" with clib.Session() as lib: with GMTTempFile() as out_fname: lib.call_module("info", "{} -C ->{}".format(data_fname, out_fname.name)) assert os.path.exists(out_fname.name) output = out_fname.read().strip() assert output == "11.5309 61.7074 -2.9289 7.8648 0.1412 0.9338" def test_call_module_invalid_arguments(): """ Fails for invalid module arguments. """ with clib.Session() as lib: with pytest.raises(GMTCLibError): lib.call_module("info", "bogus-data.bla") def test_call_module_invalid_name(): """ Fails when given bad input. """ with clib.Session() as lib: with pytest.raises(GMTCLibError): lib.call_module("meh", "") def test_call_module_error_message(): """ Check is the GMT error message was captured. """ with clib.Session() as lib: try: lib.call_module("info", "bogus-data.bla") except GMTCLibError as error: assert "Module 'info' failed with status code" in str(error) assert "gmtinfo [ERROR]: Cannot find file bogus-data.bla" in str(error) def test_method_no_session(): """ Fails when not in a session. """ # Create an instance of Session without "with" so no session is created. lib = clib.Session() with pytest.raises(GMTCLibNoSessionError): lib.call_module("gmtdefaults", "") with pytest.raises(GMTCLibNoSessionError): lib.session_pointer # pylint: disable=pointless-statement def test_parse_constant_single(): """ Parsing a single family argument correctly. """ lib = clib.Session() for family in FAMILIES: parsed = lib._parse_constant(family, valid=FAMILIES) assert parsed == lib[family] def test_parse_constant_composite(): """ Parsing a composite constant argument (separated by |) correctly. """ lib = clib.Session() test_cases = ((family, via) for family in FAMILIES for via in VIAS) for family, via in test_cases: composite = "|".join([family, via]) expected = lib[family] + lib[via] parsed = lib._parse_constant(composite, valid=FAMILIES, valid_modifiers=VIAS) assert parsed == expected def test_parse_constant_fails(): """ Check if the function fails when given bad input. """ lib = clib.Session() test_cases = [ "SOME_random_STRING", "GMT_IS_DATASET|GMT_VIA_MATRIX|GMT_VIA_VECTOR", "GMT_IS_DATASET|NOT_A_PROPER_VIA", "NOT_A_PROPER_FAMILY|GMT_VIA_MATRIX", "NOT_A_PROPER_FAMILY|ALSO_INVALID", ] for test_case in test_cases: with pytest.raises(GMTInvalidInput): lib._parse_constant(test_case, valid=FAMILIES, valid_modifiers=VIAS) # Should also fail if not given valid modifiers but is using them anyway. # This should work... lib._parse_constant( "GMT_IS_DATASET|GMT_VIA_MATRIX", valid=FAMILIES, valid_modifiers=VIAS ) # But this shouldn't. with pytest.raises(GMTInvalidInput): lib._parse_constant( "GMT_IS_DATASET|GMT_VIA_MATRIX", valid=FAMILIES, valid_modifiers=None ) def test_create_data_dataset(): """ Run the function to make sure it doesn't fail badly. """ with clib.Session() as lib: # Dataset from vectors data_vector = lib.create_data( family="GMT_IS_DATASET|GMT_VIA_VECTOR", geometry="GMT_IS_POINT", mode="GMT_CONTAINER_ONLY", dim=[10, 20, 1, 0], # columns, rows, layers, dtype ) # Dataset from matrices data_matrix = lib.create_data( family="GMT_IS_DATASET|GMT_VIA_MATRIX", geometry="GMT_IS_POINT", mode="GMT_CONTAINER_ONLY", dim=[10, 20, 1, 0], ) assert data_vector != data_matrix def test_create_data_grid_dim(): """ Create a grid ignoring range and inc. """ with clib.Session() as lib: # Grids from matrices using dim lib.create_data( family="GMT_IS_GRID|GMT_VIA_MATRIX", geometry="GMT_IS_SURFACE", mode="GMT_CONTAINER_ONLY", dim=[10, 20, 1, 0], ) def test_create_data_grid_range(): """ Create a grid specifying range and inc instead of dim. """ with clib.Session() as lib: # Grids from matrices using range and int lib.create_data( family="GMT_IS_GRID|GMT_VIA_MATRIX", geometry="GMT_IS_SURFACE", mode="GMT_CONTAINER_ONLY", ranges=[150.0, 250.0, -20.0, 20.0], inc=[0.1, 0.2], ) def test_create_data_fails(): """ Check that create_data raises exceptions for invalid input and output. """ # Passing in invalid mode with pytest.raises(GMTInvalidInput): with clib.Session() as lib: lib.create_data( family="GMT_IS_DATASET", geometry="GMT_IS_SURFACE", mode="Not_a_valid_mode", dim=[0, 0, 1, 0], ranges=[150.0, 250.0, -20.0, 20.0], inc=[0.1, 0.2], ) # Passing in invalid geometry with pytest.raises(GMTInvalidInput): with clib.Session() as lib: lib.create_data( family="GMT_IS_GRID", geometry="Not_a_valid_geometry", mode="GMT_CONTAINER_ONLY", dim=[0, 0, 1, 0], ranges=[150.0, 250.0, -20.0, 20.0], inc=[0.1, 0.2], ) # If the data pointer returned is None (NULL pointer) with pytest.raises(GMTCLibError): with clib.Session() as lib: with mock(lib, "GMT_Create_Data", returns=None): lib.create_data( family="GMT_IS_DATASET", geometry="GMT_IS_SURFACE", mode="GMT_CONTAINER_ONLY", dim=[11, 10, 2, 0], ) def test_virtual_file(): """ Test passing in data via a virtual file with a Dataset. """ dtypes = "float32 float64 int32 int64 uint32 uint64".split() shape = (5, 3) for dtype in dtypes: with clib.Session() as lib: family = "GMT_IS_DATASET|GMT_VIA_MATRIX" geometry = "GMT_IS_POINT" dataset = lib.create_data( family=family, geometry=geometry, mode="GMT_CONTAINER_ONLY", dim=[shape[1], shape[0], 1, 0], # columns, rows, layers, dtype ) data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape) lib.put_matrix(dataset, matrix=data) # Add the dataset to a virtual file and pass it along to gmt info vfargs = (family, geometry, "GMT_IN|GMT_IS_REFERENCE", dataset) with lib.open_virtual_file(*vfargs) as vfile: with GMTTempFile() as outfile: lib.call_module("info", "{} ->{}".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds = "\t".join( ["<{:.0f}/{:.0f}>".format(col.min(), col.max()) for col in data.T] ) expected = "<matrix memory>: N = {}\t{}\n".format(shape[0], bounds) assert output == expected def test_virtual_file_fails(): """ Check that opening and closing virtual files raises an exception for non- zero return codes. """ vfargs = ( "GMT_IS_DATASET|GMT_VIA_MATRIX", "GMT_IS_POINT", "GMT_IN|GMT_IS_REFERENCE", None, ) # Mock Open_VirtualFile to test the status check when entering the context. # If the exception is raised, the code won't get to the closing of the # virtual file. with clib.Session() as lib, mock(lib, "GMT_Open_VirtualFile", returns=1): with pytest.raises(GMTCLibError): with lib.open_virtual_file(*vfargs): print("Should not get to this code") # Test the status check when closing the virtual file # Mock the opening to return 0 (success) so that we don't open a file that # we won't close later. with clib.Session() as lib, mock(lib, "GMT_Open_VirtualFile", returns=0), mock( lib, "GMT_Close_VirtualFile", returns=1 ): with pytest.raises(GMTCLibError): with lib.open_virtual_file(*vfargs): pass print("Shouldn't get to this code either") def test_virtual_file_bad_direction(): """ Test passing an invalid direction argument. """ with clib.Session() as lib: vfargs = ( "GMT_IS_DATASET|GMT_VIA_MATRIX", "GMT_IS_POINT", "GMT_IS_GRID", # The invalid direction argument 0, ) with pytest.raises(GMTInvalidInput): with lib.open_virtual_file(*vfargs): print("This should have failed") def test_virtualfile_from_vectors(): """ Test the automation for transforming vectors to virtual file dataset. """ dtypes = "float32 float64 int32 int64 uint32 uint64".split() size = 10 for dtype in dtypes: x = np.arange(size, dtype=dtype) y = np.arange(size, size * 2, 1, dtype=dtype) z = np.arange(size * 2, size * 3, 1, dtype=dtype) with clib.Session() as lib: with lib.virtualfile_from_vectors(x, y, z) as vfile: with GMTTempFile() as outfile: lib.call_module("info", "{} ->{}".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds = "\t".join( ["<{:.0f}/{:.0f}>".format(i.min(), i.max()) for i in (x, y, z)] ) expected = "<vector memory>: N = {}\t{}\n".format(size, bounds) assert output == expected @pytest.mark.parametrize("dtype", [str, object]) def test_virtualfile_from_vectors_one_string_or_object_column(dtype): """ Test passing in one column with string or object dtype into virtual file dataset. """ size = 5 x = np.arange(size, dtype=np.int32) y = np.arange(size, size * 2, 1, dtype=np.int32) strings = np.array(["a", "bc", "defg", "hijklmn", "opqrst"], dtype=dtype) with clib.Session() as lib: with lib.virtualfile_from_vectors(x, y, strings) as vfile: with GMTTempFile() as outfile: lib.call_module("convert", f"{vfile} ->{outfile.name}") output = outfile.read(keep_tabs=True) expected = "".join(f"{i}\t{j}\t{k}\n" for i, j, k in zip(x, y, strings)) assert output == expected @pytest.mark.parametrize("dtype", [str, object]) def test_virtualfile_from_vectors_two_string_or_object_columns(dtype): """ Test passing in two columns of string or object dtype into virtual file dataset. """ size = 5 x = np.arange(size, dtype=np.int32) y = np.arange(size, size * 2, 1, dtype=np.int32) strings1 = np.array(["a", "bc", "def", "ghij", "klmno"], dtype=dtype) strings2 = np.array(["pqrst", "uvwx", "yz!", "@#", "$"], dtype=dtype) with clib.Session() as lib: with lib.virtualfile_from_vectors(x, y, strings1, strings2) as vfile: with GMTTempFile() as outfile: lib.call_module("convert", f"{vfile} ->{outfile.name}") output = outfile.read(keep_tabs=True) expected = "".join( f"{h}\t{i}\t{j} {k}\n" for h, i, j, k in zip(x, y, strings1, strings2) ) assert output == expected def test_virtualfile_from_vectors_transpose(): """ Test transforming matrix columns to virtual file dataset. """ dtypes = "float32 float64 int32 int64 uint32 uint64".split() shape = (7, 5) for dtype in dtypes: data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape) with clib.Session() as lib: with lib.virtualfile_from_vectors(*data.T) as vfile: with GMTTempFile() as outfile: lib.call_module("info", "{} -C ->{}".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds = "\t".join( ["{:.0f}\t{:.0f}".format(col.min(), col.max()) for col in data.T] ) expected = "{}\n".format(bounds) assert output == expected def test_virtualfile_from_vectors_diff_size(): """ Test the function fails for arrays of different sizes. """ x = np.arange(5) y = np.arange(6) with clib.Session() as lib: with pytest.raises(GMTInvalidInput): with lib.virtualfile_from_vectors(x, y): print("This should have failed") def test_virtualfile_from_matrix(): """ Test transforming a matrix to virtual file dataset. """ dtypes = "float32 float64 int32 int64 uint32 uint64".split() shape = (7, 5) for dtype in dtypes: data = np.arange(shape[0] * shape[1], dtype=dtype).reshape(shape) with clib.Session() as lib: with lib.virtualfile_from_matrix(data) as vfile: with GMTTempFile() as outfile: lib.call_module("info", "{} ->{}".format(vfile, outfile.name)) output = outfile.read(keep_tabs=True) bounds = "\t".join( ["<{:.0f}/{:.0f}>".format(col.min(), col.max()) for col in data.T] ) expected = "<matrix memory>: N = {}\t{}\n".format(shape[0], bounds) assert output == expected def test_virtualfile_from_matrix_slice(): """ Test transforming a slice of a larger array to virtual file dataset. """ dtypes = "float32 float64 int32 int64 uint32 uint64".split() shape = (10, 6) for dtype in dtypes: full_data =
np.arange(shape[0] * shape[1], dtype=dtype)
numpy.arange
"""Test the search module""" from collections.abc import Iterable, Sized from io import StringIO from itertools import chain, product from functools import partial import pickle import sys from types import GeneratorType import re import numpy as np import scipy.sparse as sp import pytest from sklearn.utils.fixes import sp_version from sklearn.utils._testing import assert_raises from sklearn.utils._testing import assert_warns from sklearn.utils._testing import assert_warns_message from sklearn.utils._testing import assert_raise_message from sklearn.utils._testing import assert_array_equal from sklearn.utils._testing import assert_array_almost_equal from sklearn.utils._testing import assert_allclose from sklearn.utils._testing import assert_almost_equal from sklearn.utils._testing import ignore_warnings from sklearn.utils._mocking import CheckingClassifier, MockDataFrame from scipy.stats import bernoulli, expon, uniform from sklearn.base import BaseEstimator, ClassifierMixin from sklearn.base import clone from sklearn.exceptions import NotFittedError from sklearn.datasets import make_classification from sklearn.datasets import make_blobs from sklearn.datasets import make_multilabel_classification from sklearn.model_selection import fit_grid_point from sklearn.model_selection import train_test_split from sklearn.model_selection import KFold from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import StratifiedShuffleSplit from sklearn.model_selection import LeaveOneGroupOut from sklearn.model_selection import LeavePGroupsOut from sklearn.model_selection import GroupKFold from sklearn.model_selection import GroupShuffleSplit from sklearn.model_selection import GridSearchCV from sklearn.model_selection import RandomizedSearchCV from sklearn.model_selection import ParameterGrid from sklearn.model_selection import ParameterSampler from sklearn.model_selection._search import BaseSearchCV from sklearn.model_selection._validation import FitFailedWarning from sklearn.svm import LinearSVC, SVC from sklearn.tree import DecisionTreeRegressor from sklearn.tree import DecisionTreeClassifier from sklearn.cluster import KMeans from sklearn.neighbors import KernelDensity from sklearn.neighbors import KNeighborsClassifier from sklearn.metrics import f1_score from sklearn.metrics import recall_score from sklearn.metrics import accuracy_score from sklearn.metrics import make_scorer from sklearn.metrics import roc_auc_score from sklearn.metrics.pairwise import euclidean_distances from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline from sklearn.linear_model import Ridge, SGDClassifier, LinearRegression from sklearn.experimental import enable_hist_gradient_boosting # noqa from sklearn.ensemble import HistGradientBoostingClassifier from sklearn.model_selection.tests.common import OneTimeSplitter # Neither of the following two estimators inherit from BaseEstimator, # to test hyperparameter search on user-defined classifiers. class MockClassifier: """Dummy classifier to test the parameter search algorithms""" def __init__(self, foo_param=0): self.foo_param = foo_param def fit(self, X, Y): assert len(X) == len(Y) self.classes_ = np.unique(Y) return self def predict(self, T): return T.shape[0] def transform(self, X): return X + self.foo_param def inverse_transform(self, X): return X - self.foo_param predict_proba = predict predict_log_proba = predict decision_function = predict def score(self, X=None, Y=None): if self.foo_param > 1: score = 1. else: score = 0. return score def get_params(self, deep=False): return {'foo_param': self.foo_param} def set_params(self, **params): self.foo_param = params['foo_param'] return self class LinearSVCNoScore(LinearSVC): """An LinearSVC classifier that has no score method.""" @property def score(self): raise AttributeError X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) y = np.array([1, 1, 2, 2]) def assert_grid_iter_equals_getitem(grid): assert list(grid) == [grid[i] for i in range(len(grid))] @pytest.mark.parametrize("klass", [ParameterGrid, partial(ParameterSampler, n_iter=10)]) @pytest.mark.parametrize( "input, error_type, error_message", [(0, TypeError, r'Parameter .* is not a dict or a list \(0\)'), ([{'foo': [0]}, 0], TypeError, r'Parameter .* is not a dict \(0\)'), ({'foo': 0}, TypeError, "Parameter.* value is not iterable .*" r"\(key='foo', value=0\)")] ) def test_validate_parameter_input(klass, input, error_type, error_message): with pytest.raises(error_type, match=error_message): klass(input) def test_parameter_grid(): # Test basic properties of ParameterGrid. params1 = {"foo": [1, 2, 3]} grid1 = ParameterGrid(params1) assert isinstance(grid1, Iterable) assert isinstance(grid1, Sized) assert len(grid1) == 3 assert_grid_iter_equals_getitem(grid1) params2 = {"foo": [4, 2], "bar": ["ham", "spam", "eggs"]} grid2 = ParameterGrid(params2) assert len(grid2) == 6 # loop to assert we can iterate over the grid multiple times for i in range(2): # tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2) points = set(tuple(chain(*(sorted(p.items())))) for p in grid2) assert (points == set(("bar", x, "foo", y) for x, y in product(params2["bar"], params2["foo"]))) assert_grid_iter_equals_getitem(grid2) # Special case: empty grid (useful to get default estimator settings) empty = ParameterGrid({}) assert len(empty) == 1 assert list(empty) == [{}] assert_grid_iter_equals_getitem(empty) assert_raises(IndexError, lambda: empty[1]) has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}]) assert len(has_empty) == 4 assert list(has_empty) == [{'C': 1}, {'C': 10}, {}, {'C': .5}] assert_grid_iter_equals_getitem(has_empty) def test_grid_search(): # Test that the best estimator contains the right value for foo_param clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=3, verbose=3) # make sure it selects the smallest parameter in case of ties old_stdout = sys.stdout sys.stdout = StringIO() grid_search.fit(X, y) sys.stdout = old_stdout assert grid_search.best_estimator_.foo_param == 2 assert_array_equal(grid_search.cv_results_["param_foo_param"].data, [1, 2, 3]) # Smoke test the score etc: grid_search.score(X, y) grid_search.predict_proba(X) grid_search.decision_function(X) grid_search.transform(X) # Test exception handling on scoring grid_search.scoring = 'sklearn' assert_raises(ValueError, grid_search.fit, X, y) def test_grid_search_pipeline_steps(): # check that parameters that are estimators are cloned before fitting pipe = Pipeline([('regressor', LinearRegression())]) param_grid = {'regressor': [LinearRegression(), Ridge()]} grid_search = GridSearchCV(pipe, param_grid, cv=2) grid_search.fit(X, y) regressor_results = grid_search.cv_results_['param_regressor'] assert isinstance(regressor_results[0], LinearRegression) assert isinstance(regressor_results[1], Ridge) assert not hasattr(regressor_results[0], 'coef_') assert not hasattr(regressor_results[1], 'coef_') assert regressor_results[0] is not grid_search.best_estimator_ assert regressor_results[1] is not grid_search.best_estimator_ # check that we didn't modify the parameter grid that was passed assert not hasattr(param_grid['regressor'][0], 'coef_') assert not hasattr(param_grid['regressor'][1], 'coef_') @pytest.mark.parametrize("SearchCV", [GridSearchCV, RandomizedSearchCV]) def test_SearchCV_with_fit_params(SearchCV): X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = CheckingClassifier(expected_fit_params=['spam', 'eggs']) searcher = SearchCV( clf, {'foo_param': [1, 2, 3]}, cv=2, error_score="raise" ) # The CheckingClassifier generates an assertion error if # a parameter is missing or has length != len(X). err_msg = r"Expected fit parameter\(s\) \['eggs'\] not seen." with pytest.raises(AssertionError, match=err_msg): searcher.fit(X, y, spam=np.ones(10)) err_msg = "Fit parameter spam has length 1; expected" with pytest.raises(AssertionError, match=err_msg): searcher.fit(X, y, spam=np.ones(1), eggs=np.zeros(10)) searcher.fit(X, y, spam=np.ones(10), eggs=np.zeros(10)) @ignore_warnings def test_grid_search_no_score(): # Test grid-search on classifier that has no score function. clf = LinearSVC(random_state=0) X, y = make_blobs(random_state=0, centers=2) Cs = [.1, 1, 10] clf_no_score = LinearSVCNoScore(random_state=0) grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy') grid_search.fit(X, y) grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs}, scoring='accuracy') # smoketest grid search grid_search_no_score.fit(X, y) # check that best params are equal assert grid_search_no_score.best_params_ == grid_search.best_params_ # check that we can call score and that it gives the correct result assert grid_search.score(X, y) == grid_search_no_score.score(X, y) # giving no scoring function raises an error grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs}) assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit, [[1]]) def test_grid_search_score_method(): X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2, random_state=0) clf = LinearSVC(random_state=0) grid = {'C': [.1]} search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y) search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y) search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid, scoring='roc_auc' ).fit(X, y) search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y) # Check warning only occurs in situation where behavior changed: # estimator requires score method to compete with scoring parameter score_no_scoring = search_no_scoring.score(X, y) score_accuracy = search_accuracy.score(X, y) score_no_score_auc = search_no_score_method_auc.score(X, y) score_auc = search_auc.score(X, y) # ensure the test is sane assert score_auc < 1.0 assert score_accuracy < 1.0 assert score_auc != score_accuracy assert_almost_equal(score_accuracy, score_no_scoring) assert_almost_equal(score_auc, score_no_score_auc) def test_grid_search_groups(): # Check if ValueError (when groups is None) propagates to GridSearchCV # And also check if groups is correctly passed to the cv object rng = np.random.RandomState(0) X, y = make_classification(n_samples=15, n_classes=2, random_state=0) groups = rng.randint(0, 3, 15) clf = LinearSVC(random_state=0) grid = {'C': [1]} group_cvs = [LeaveOneGroupOut(), LeavePGroupsOut(2), GroupKFold(n_splits=3), GroupShuffleSplit()] for cv in group_cvs: gs = GridSearchCV(clf, grid, cv=cv) assert_raise_message(ValueError, "The 'groups' parameter should not be None.", gs.fit, X, y) gs.fit(X, y, groups=groups) non_group_cvs = [StratifiedKFold(), StratifiedShuffleSplit()] for cv in non_group_cvs: gs = GridSearchCV(clf, grid, cv=cv) # Should not raise an error gs.fit(X, y) def test_classes__property(): # Test that classes_ property matches best_estimator_.classes_ X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) Cs = [.1, 1, 10] grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs}) grid_search.fit(X, y) assert_array_equal(grid_search.best_estimator_.classes_, grid_search.classes_) # Test that regressors do not have a classes_ attribute grid_search = GridSearchCV(Ridge(), {'alpha': [1.0, 2.0]}) grid_search.fit(X, y) assert not hasattr(grid_search, 'classes_') # Test that the grid searcher has no classes_ attribute before it's fit grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs}) assert not hasattr(grid_search, 'classes_') # Test that the grid searcher has no classes_ attribute without a refit grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs}, refit=False) grid_search.fit(X, y) assert not hasattr(grid_search, 'classes_') def test_trivial_cv_results_attr(): # Test search over a "grid" with only one point. clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1]}, cv=3) grid_search.fit(X, y) assert hasattr(grid_search, "cv_results_") random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1, cv=3) random_search.fit(X, y) assert hasattr(grid_search, "cv_results_") def test_no_refit(): # Test that GSCV can be used for model selection alone without refitting clf = MockClassifier() for scoring in [None, ['accuracy', 'precision']]: grid_search = GridSearchCV( clf, {'foo_param': [1, 2, 3]}, refit=False, cv=3 ) grid_search.fit(X, y) assert not hasattr(grid_search, "best_estimator_") and \ hasattr(grid_search, "best_index_") and \ hasattr(grid_search, "best_params_") # Make sure the functions predict/transform etc raise meaningful # error messages for fn_name in ('predict', 'predict_proba', 'predict_log_proba', 'transform', 'inverse_transform'): assert_raise_message(NotFittedError, ('refit=False. %s is available only after ' 'refitting on the best parameters' % fn_name), getattr(grid_search, fn_name), X) # Test that an invalid refit param raises appropriate error messages for refit in ["", 5, True, 'recall', 'accuracy']: assert_raise_message(ValueError, "For multi-metric scoring, the " "parameter refit must be set to a scorer key", GridSearchCV(clf, {}, refit=refit, scoring={'acc': 'accuracy', 'prec': 'precision'} ).fit, X, y) def test_grid_search_error(): # Test that grid search will capture errors on data with different length X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) assert_raises(ValueError, cv.fit, X_[:180], y_) def test_grid_search_one_grid_point(): X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]} clf = SVC(gamma='auto') cv = GridSearchCV(clf, param_dict) cv.fit(X_, y_) clf = SVC(C=1.0, kernel="rbf", gamma=0.1) clf.fit(X_, y_) assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_) def test_grid_search_when_param_grid_includes_range(): # Test that the best estimator contains the right value for foo_param clf = MockClassifier() grid_search = None grid_search = GridSearchCV(clf, {'foo_param': range(1, 4)}, cv=3) grid_search.fit(X, y) assert grid_search.best_estimator_.foo_param == 2 def test_grid_search_bad_param_grid(): param_dict = {"C": 1} clf = SVC(gamma='auto') assert_raise_message( ValueError, "Parameter grid for parameter (C) needs to" " be a list or numpy array, but got (<class 'int'>)." " Single values need to be wrapped in a list" " with one element.", GridSearchCV, clf, param_dict) param_dict = {"C": []} clf = SVC() assert_raise_message( ValueError, "Parameter values for parameter (C) need to be a non-empty sequence.", GridSearchCV, clf, param_dict) param_dict = {"C": "1,2,3"} clf = SVC(gamma='auto') assert_raise_message( ValueError, "Parameter grid for parameter (C) needs to" " be a list or numpy array, but got (<class 'str'>)." " Single values need to be wrapped in a list" " with one element.", GridSearchCV, clf, param_dict) param_dict = {"C": np.ones((3, 2))} clf = SVC() assert_raises(ValueError, GridSearchCV, clf, param_dict) def test_grid_search_sparse(): # Test that grid search works with both dense and sparse matrices X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) cv.fit(X_[:180], y_[:180]) y_pred = cv.predict(X_[180:]) C = cv.best_estimator_.C X_ = sp.csr_matrix(X_) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) cv.fit(X_[:180].tocoo(), y_[:180]) y_pred2 = cv.predict(X_[180:]) C2 = cv.best_estimator_.C assert np.mean(y_pred == y_pred2) >= .9 assert C == C2 def test_grid_search_sparse_scoring(): X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1") cv.fit(X_[:180], y_[:180]) y_pred = cv.predict(X_[180:]) C = cv.best_estimator_.C X_ = sp.csr_matrix(X_) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1") cv.fit(X_[:180], y_[:180]) y_pred2 = cv.predict(X_[180:]) C2 = cv.best_estimator_.C assert_array_equal(y_pred, y_pred2) assert C == C2 # Smoke test the score # np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]), # cv.score(X_[:180], y[:180])) # test loss where greater is worse def f1_loss(y_true_, y_pred_): return -f1_score(y_true_, y_pred_) F1Loss = make_scorer(f1_loss, greater_is_better=False) cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss) cv.fit(X_[:180], y_[:180]) y_pred3 = cv.predict(X_[180:]) C3 = cv.best_estimator_.C assert C == C3 assert_array_equal(y_pred, y_pred3) def test_grid_search_precomputed_kernel(): # Test that grid search works when the input features are given in the # form of a precomputed kernel matrix X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) # compute the training kernel matrix corresponding to the linear kernel K_train = np.dot(X_[:180], X_[:180].T) y_train = y_[:180] clf = SVC(kernel='precomputed') cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) cv.fit(K_train, y_train) assert cv.best_score_ >= 0 # compute the test kernel matrix K_test = np.dot(X_[180:], X_[:180].T) y_test = y_[180:] y_pred = cv.predict(K_test) assert np.mean(y_pred == y_test) >= 0 # test error is raised when the precomputed kernel is not array-like # or sparse assert_raises(ValueError, cv.fit, K_train.tolist(), y_train) def test_grid_search_precomputed_kernel_error_nonsquare(): # Test that grid search returns an error with a non-square precomputed # training kernel matrix K_train = np.zeros((10, 20)) y_train = np.ones((10, )) clf = SVC(kernel='precomputed') cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) assert_raises(ValueError, cv.fit, K_train, y_train) class BrokenClassifier(BaseEstimator): """Broken classifier that cannot be fit twice""" def __init__(self, parameter=None): self.parameter = parameter def fit(self, X, y): assert not hasattr(self, 'has_been_fit_') self.has_been_fit_ = True def predict(self, X): return np.zeros(X.shape[0]) @ignore_warnings def test_refit(): # Regression test for bug in refitting # Simulates re-fitting a broken estimator; this used to break with # sparse SVMs. X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}], scoring="precision", refit=True) clf.fit(X, y) def test_refit_callable(): """ Test refit=callable, which adds flexibility in identifying the "best" estimator. """ def refit_callable(cv_results): """ A dummy function tests `refit=callable` interface. Return the index of a model that has the least `mean_test_score`. """ # Fit a dummy clf with `refit=True` to get a list of keys in # clf.cv_results_. X, y = make_classification(n_samples=100, n_features=4, random_state=42) clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.01, 0.1, 1]}, scoring='precision', refit=True) clf.fit(X, y) # Ensure that `best_index_ != 0` for this dummy clf assert clf.best_index_ != 0 # Assert every key matches those in `cv_results` for key in clf.cv_results_.keys(): assert key in cv_results return cv_results['mean_test_score'].argmin() X, y = make_classification(n_samples=100, n_features=4, random_state=42) clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.01, 0.1, 1]}, scoring='precision', refit=refit_callable) clf.fit(X, y) assert clf.best_index_ == 0 # Ensure `best_score_` is disabled when using `refit=callable` assert not hasattr(clf, 'best_score_') def test_refit_callable_invalid_type(): """ Test implementation catches the errors when 'best_index_' returns an invalid result. """ def refit_callable_invalid_type(cv_results): """ A dummy function tests when returned 'best_index_' is not integer. """ return None X, y = make_classification(n_samples=100, n_features=4, random_state=42) clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.1, 1]}, scoring='precision', refit=refit_callable_invalid_type) with pytest.raises(TypeError, match='best_index_ returned is not an integer'): clf.fit(X, y) @pytest.mark.parametrize('out_bound_value', [-1, 2]) @pytest.mark.parametrize('search_cv', [RandomizedSearchCV, GridSearchCV]) def test_refit_callable_out_bound(out_bound_value, search_cv): """ Test implementation catches the errors when 'best_index_' returns an out of bound result. """ def refit_callable_out_bound(cv_results): """ A dummy function tests when returned 'best_index_' is out of bounds. """ return out_bound_value X, y = make_classification(n_samples=100, n_features=4, random_state=42) clf = search_cv(LinearSVC(random_state=42), {'C': [0.1, 1]}, scoring='precision', refit=refit_callable_out_bound) with pytest.raises(IndexError, match='best_index_ index out of range'): clf.fit(X, y) def test_refit_callable_multi_metric(): """ Test refit=callable in multiple metric evaluation setting """ def refit_callable(cv_results): """ A dummy function tests `refit=callable` interface. Return the index of a model that has the least `mean_test_prec`. """ assert 'mean_test_prec' in cv_results return cv_results['mean_test_prec'].argmin() X, y = make_classification(n_samples=100, n_features=4, random_state=42) scoring = {'Accuracy': make_scorer(accuracy_score), 'prec': 'precision'} clf = GridSearchCV(LinearSVC(random_state=42), {'C': [0.01, 0.1, 1]}, scoring=scoring, refit=refit_callable) clf.fit(X, y) assert clf.best_index_ == 0 # Ensure `best_score_` is disabled when using `refit=callable` assert not hasattr(clf, 'best_score_') def test_gridsearch_nd(): # Pass X as list in GridSearchCV X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2) y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11) check_X = lambda x: x.shape[1:] == (5, 3, 2) check_y = lambda x: x.shape[1:] == (7, 11) clf = CheckingClassifier( check_X=check_X, check_y=check_y, methods_to_check=["fit"], ) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}) grid_search.fit(X_4d, y_3d).score(X, y) assert hasattr(grid_search, "cv_results_") def test_X_as_list(): # Pass X as list in GridSearchCV X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = CheckingClassifier( check_X=lambda x: isinstance(x, list), methods_to_check=["fit"], ) cv = KFold(n_splits=3) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv) grid_search.fit(X.tolist(), y).score(X, y) assert hasattr(grid_search, "cv_results_") def test_y_as_list(): # Pass y as list in GridSearchCV X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = CheckingClassifier( check_y=lambda x: isinstance(x, list), methods_to_check=["fit"], ) cv = KFold(n_splits=3) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv) grid_search.fit(X, y.tolist()).score(X, y) assert hasattr(grid_search, "cv_results_") @ignore_warnings def test_pandas_input(): # check cross_val_score doesn't destroy pandas dataframe types = [(MockDataFrame, MockDataFrame)] try: from pandas import Series, DataFrame types.append((DataFrame, Series)) except ImportError: pass X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) for InputFeatureType, TargetType in types: # X dataframe, y series X_df, y_ser = InputFeatureType(X), TargetType(y) def check_df(x): return isinstance(x, InputFeatureType) def check_series(x): return isinstance(x, TargetType) clf = CheckingClassifier(check_X=check_df, check_y=check_series) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}) grid_search.fit(X_df, y_ser).score(X_df, y_ser) grid_search.predict(X_df) assert hasattr(grid_search, "cv_results_") def test_unsupervised_grid_search(): # test grid-search with unsupervised estimator X, y = make_blobs(n_samples=50, random_state=0) km = KMeans(random_state=0, init="random", n_init=1) # Multi-metric evaluation unsupervised scoring = ['adjusted_rand_score', 'fowlkes_mallows_score'] for refit in ['adjusted_rand_score', 'fowlkes_mallows_score']: grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]), scoring=scoring, refit=refit) grid_search.fit(X, y) # Both ARI and FMS can find the right number :) assert grid_search.best_params_["n_clusters"] == 3 # Single metric evaluation unsupervised grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]), scoring='fowlkes_mallows_score') grid_search.fit(X, y) assert grid_search.best_params_["n_clusters"] == 3 # Now without a score, and without y grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4])) grid_search.fit(X) assert grid_search.best_params_["n_clusters"] == 4 def test_gridsearch_no_predict(): # test grid-search with an estimator without predict. # slight duplication of a test from KDE def custom_scoring(estimator, X): return 42 if estimator.bandwidth == .1 else 0 X, _ = make_blobs(cluster_std=.1, random_state=1, centers=[[0, 1], [1, 0], [0, 0]]) search = GridSearchCV(KernelDensity(), param_grid=dict(bandwidth=[.01, .1, 1]), scoring=custom_scoring) search.fit(X) assert search.best_params_['bandwidth'] == .1 assert search.best_score_ == 42 def test_param_sampler(): # test basic properties of param sampler param_distributions = {"kernel": ["rbf", "linear"], "C": uniform(0, 1)} sampler = ParameterSampler(param_distributions=param_distributions, n_iter=10, random_state=0) samples = [x for x in sampler] assert len(samples) == 10 for sample in samples: assert sample["kernel"] in ["rbf", "linear"] assert 0 <= sample["C"] <= 1 # test that repeated calls yield identical parameters param_distributions = {"C": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} sampler = ParameterSampler(param_distributions=param_distributions, n_iter=3, random_state=0) assert [x for x in sampler] == [x for x in sampler] if sp_version >= (0, 16): param_distributions = {"C": uniform(0, 1)} sampler = ParameterSampler(param_distributions=param_distributions, n_iter=10, random_state=0) assert [x for x in sampler] == [x for x in sampler] def check_cv_results_array_types(search, param_keys, score_keys): # Check if the search `cv_results`'s array are of correct types cv_results = search.cv_results_ assert all(isinstance(cv_results[param], np.ma.MaskedArray) for param in param_keys) assert all(cv_results[key].dtype == object for key in param_keys) assert not any(isinstance(cv_results[key], np.ma.MaskedArray) for key in score_keys) assert all(cv_results[key].dtype == np.float64 for key in score_keys if not key.startswith('rank')) scorer_keys = search.scorer_.keys() if search.multimetric_ else ['score'] for key in scorer_keys: assert cv_results['rank_test_%s' % key].dtype == np.int32 def check_cv_results_keys(cv_results, param_keys, score_keys, n_cand): # Test the search.cv_results_ contains all the required results assert_array_equal(sorted(cv_results.keys()), sorted(param_keys + score_keys + ('params',))) assert all(cv_results[key].shape == (n_cand,) for key in param_keys + score_keys) def test_grid_search_cv_results(): X, y = make_classification(n_samples=50, n_features=4, random_state=42) n_splits = 3 n_grid_points = 6 params = [dict(kernel=['rbf', ], C=[1, 10], gamma=[0.1, 1]), dict(kernel=['poly', ], degree=[1, 2])] param_keys = ('param_C', 'param_degree', 'param_gamma', 'param_kernel') score_keys = ('mean_test_score', 'mean_train_score', 'rank_test_score', 'split0_test_score', 'split1_test_score', 'split2_test_score', 'split0_train_score', 'split1_train_score', 'split2_train_score', 'std_test_score', 'std_train_score', 'mean_fit_time', 'std_fit_time', 'mean_score_time', 'std_score_time') n_candidates = n_grid_points search = GridSearchCV(SVC(), cv=n_splits, param_grid=params, return_train_score=True) search.fit(X, y) cv_results = search.cv_results_ # Check if score and timing are reasonable assert all(cv_results['rank_test_score'] >= 1) assert (all(cv_results[k] >= 0) for k in score_keys if k != 'rank_test_score') assert (all(cv_results[k] <= 1) for k in score_keys if 'time' not in k and k != 'rank_test_score') # Check cv_results structure check_cv_results_array_types(search, param_keys, score_keys) check_cv_results_keys(cv_results, param_keys, score_keys, n_candidates) # Check masking cv_results = search.cv_results_ n_candidates = len(search.cv_results_['params']) assert all((cv_results['param_C'].mask[i] and cv_results['param_gamma'].mask[i] and not cv_results['param_degree'].mask[i]) for i in range(n_candidates) if cv_results['param_kernel'][i] == 'linear') assert all((not cv_results['param_C'].mask[i] and not cv_results['param_gamma'].mask[i] and cv_results['param_degree'].mask[i]) for i in range(n_candidates) if cv_results['param_kernel'][i] == 'rbf') def test_random_search_cv_results(): X, y = make_classification(n_samples=50, n_features=4, random_state=42) n_splits = 3 n_search_iter = 30 params = [{'kernel': ['rbf'], 'C': expon(scale=10), 'gamma': expon(scale=0.1)}, {'kernel': ['poly'], 'degree': [2, 3]}] param_keys = ('param_C', 'param_degree', 'param_gamma', 'param_kernel') score_keys = ('mean_test_score', 'mean_train_score', 'rank_test_score', 'split0_test_score', 'split1_test_score', 'split2_test_score', 'split0_train_score', 'split1_train_score', 'split2_train_score', 'std_test_score', 'std_train_score', 'mean_fit_time', 'std_fit_time', 'mean_score_time', 'std_score_time') n_cand = n_search_iter search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_splits, param_distributions=params, return_train_score=True) search.fit(X, y) cv_results = search.cv_results_ # Check results structure check_cv_results_array_types(search, param_keys, score_keys) check_cv_results_keys(cv_results, param_keys, score_keys, n_cand) n_candidates = len(search.cv_results_['params']) assert all((cv_results['param_C'].mask[i] and cv_results['param_gamma'].mask[i] and not cv_results['param_degree'].mask[i]) for i in range(n_candidates) if cv_results['param_kernel'][i] == 'linear') assert all((not cv_results['param_C'].mask[i] and not cv_results['param_gamma'].mask[i] and cv_results['param_degree'].mask[i]) for i in range(n_candidates) if cv_results['param_kernel'][i] == 'rbf') @pytest.mark.parametrize( "SearchCV, specialized_params", [(GridSearchCV, {'param_grid': {'C': [1, 10]}}), (RandomizedSearchCV, {'param_distributions': {'C': [1, 10]}, 'n_iter': 2})] ) def test_search_default_iid(SearchCV, specialized_params): # Test the IID parameter TODO: Clearly this test does something else??? # noise-free simple 2d-data X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0, cluster_std=0.1, shuffle=False, n_samples=80) # split dataset into two folds that are not iid # first one contains data of all 4 blobs, second only from two. mask = np.ones(X.shape[0], dtype=np.bool) mask[np.where(y == 1)[0][::2]] = 0 mask[np.where(y == 2)[0][::2]] = 0 # this leads to perfect classification on one fold and a score of 1/3 on # the other # create "cv" for splits cv = [[mask, ~mask], [~mask, mask]] common_params = {'estimator': SVC(), 'cv': cv, 'return_train_score': True} search = SearchCV(**common_params, **specialized_params) search.fit(X, y) test_cv_scores = np.array( [search.cv_results_['split%d_test_score' % s][0] for s in range(search.n_splits_)] ) test_mean = search.cv_results_['mean_test_score'][0] test_std = search.cv_results_['std_test_score'][0] train_cv_scores = np.array( [search.cv_results_['split%d_train_score' % s][0] for s in range(search.n_splits_)] ) train_mean = search.cv_results_['mean_train_score'][0] train_std = search.cv_results_['std_train_score'][0] assert search.cv_results_['param_C'][0] == 1 # scores are the same as above assert_allclose(test_cv_scores, [1, 1. / 3.]) assert_allclose(train_cv_scores, [1, 1]) # Unweighted mean/std is used assert test_mean == pytest.approx(np.mean(test_cv_scores)) assert test_std == pytest.approx(np.std(test_cv_scores)) # For the train scores, we do not take a weighted mean irrespective of # i.i.d. or not assert train_mean == pytest.approx(1) assert train_std == pytest.approx(0) def test_grid_search_cv_results_multimetric(): X, y = make_classification(n_samples=50, n_features=4, random_state=42) n_splits = 3 params = [dict(kernel=['rbf', ], C=[1, 10], gamma=[0.1, 1]), dict(kernel=['poly', ], degree=[1, 2])] grid_searches = [] for scoring in ({'accuracy': make_scorer(accuracy_score), 'recall': make_scorer(recall_score)}, 'accuracy', 'recall'): grid_search = GridSearchCV(SVC(), cv=n_splits, param_grid=params, scoring=scoring, refit=False) grid_search.fit(X, y) grid_searches.append(grid_search) compare_cv_results_multimetric_with_single(*grid_searches) def test_random_search_cv_results_multimetric(): X, y = make_classification(n_samples=50, n_features=4, random_state=42) n_splits = 3 n_search_iter = 30 # Scipy 0.12's stats dists do not accept seed, hence we use param grid params = dict(C=np.logspace(-4, 1, 3), gamma=np.logspace(-5, 0, 3, base=0.1)) for refit in (True, False): random_searches = [] for scoring in (('accuracy', 'recall'), 'accuracy', 'recall'): # If True, for multi-metric pass refit='accuracy' if refit: probability = True refit = 'accuracy' if isinstance(scoring, tuple) else refit else: probability = False clf = SVC(probability=probability, random_state=42) random_search = RandomizedSearchCV(clf, n_iter=n_search_iter, cv=n_splits, param_distributions=params, scoring=scoring, refit=refit, random_state=0) random_search.fit(X, y) random_searches.append(random_search) compare_cv_results_multimetric_with_single(*random_searches) compare_refit_methods_when_refit_with_acc( random_searches[0], random_searches[1], refit) def compare_cv_results_multimetric_with_single( search_multi, search_acc, search_rec): """Compare multi-metric cv_results with the ensemble of multiple single metric cv_results from single metric grid/random search""" assert search_multi.multimetric_ assert_array_equal(sorted(search_multi.scorer_), ('accuracy', 'recall')) cv_results_multi = search_multi.cv_results_ cv_results_acc_rec = {re.sub('_score$', '_accuracy', k): v for k, v in search_acc.cv_results_.items()} cv_results_acc_rec.update({re.sub('_score$', '_recall', k): v for k, v in search_rec.cv_results_.items()}) # Check if score and timing are reasonable, also checks if the keys # are present assert all((np.all(cv_results_multi[k] <= 1) for k in ( 'mean_score_time', 'std_score_time', 'mean_fit_time', 'std_fit_time'))) # Compare the keys, other than time keys, among multi-metric and # single metric grid search results. np.testing.assert_equal performs a # deep nested comparison of the two cv_results dicts np.testing.assert_equal({k: v for k, v in cv_results_multi.items() if not k.endswith('_time')}, {k: v for k, v in cv_results_acc_rec.items() if not k.endswith('_time')}) def compare_refit_methods_when_refit_with_acc(search_multi, search_acc, refit): """Compare refit multi-metric search methods with single metric methods""" assert search_acc.refit == refit if refit: assert search_multi.refit == 'accuracy' else: assert not search_multi.refit return # search cannot predict/score without refit X, y = make_blobs(n_samples=100, n_features=4, random_state=42) for method in ('predict', 'predict_proba', 'predict_log_proba'): assert_almost_equal(getattr(search_multi, method)(X), getattr(search_acc, method)(X)) assert_almost_equal(search_multi.score(X, y), search_acc.score(X, y)) for key in ('best_index_', 'best_score_', 'best_params_'): assert getattr(search_multi, key) == getattr(search_acc, key) def test_search_cv_results_rank_tie_breaking(): X, y = make_blobs(n_samples=50, random_state=42) # The two C values are close enough to give similar models # which would result in a tie of their mean cv-scores param_grid = {'C': [1, 1.001, 0.001]} grid_search = GridSearchCV(SVC(), param_grid=param_grid, return_train_score=True) random_search = RandomizedSearchCV(SVC(), n_iter=3, param_distributions=param_grid, return_train_score=True) for search in (grid_search, random_search): search.fit(X, y) cv_results = search.cv_results_ # Check tie breaking strategy - # Check that there is a tie in the mean scores between # candidates 1 and 2 alone assert_almost_equal(cv_results['mean_test_score'][0], cv_results['mean_test_score'][1]) assert_almost_equal(cv_results['mean_train_score'][0], cv_results['mean_train_score'][1]) assert not np.allclose(cv_results['mean_test_score'][1], cv_results['mean_test_score'][2]) assert not np.allclose(cv_results['mean_train_score'][1], cv_results['mean_train_score'][2]) # 'min' rank should be assigned to the tied candidates assert_almost_equal(search.cv_results_['rank_test_score'], [1, 1, 3]) def test_search_cv_results_none_param(): X, y = [[1], [2], [3], [4], [5]], [0, 0, 0, 0, 1] estimators = (DecisionTreeRegressor(), DecisionTreeClassifier()) est_parameters = {"random_state": [0, None]} cv = KFold() for est in estimators: grid_search = GridSearchCV(est, est_parameters, cv=cv, ).fit(X, y) assert_array_equal(grid_search.cv_results_['param_random_state'], [0, None]) @ignore_warnings() def test_search_cv_timing(): svc = LinearSVC(random_state=0) X = [[1, ], [2, ], [3, ], [4, ]] y = [0, 1, 1, 0] gs = GridSearchCV(svc, {'C': [0, 1]}, cv=2, error_score=0) rs = RandomizedSearchCV(svc, {'C': [0, 1]}, cv=2, error_score=0, n_iter=2) for search in (gs, rs): search.fit(X, y) for key in ['mean_fit_time', 'std_fit_time']: # NOTE The precision of time.time in windows is not high # enough for the fit/score times to be non-zero for trivial X and y assert
np.all(search.cv_results_[key] >= 0)
numpy.all
# -*- coding: utf-8 -*- # This code is part of Qiskit. # # (C) Copyright IBM 2017, 2021. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. #from math import * from math import sin, cos from qiskit_metal import draw, Dict from qiskit_metal.qlibrary.core.base import QComponent import numpy as np #from ... import config #if not config.is_building_docs(): # from qiskit_metal import is_true class TransmonInterdigitated(QComponent): """ The base "TransmonInterdigitated" inherits the "QComponent" class. This creates a transmon pocket with two large pads connected by a Josephson junction. Both pads have four interdigitated "fingers" which increase the capacitance of the structure. There are three coupling capacitor pads with qpins defined; these can be connected to other structures in a design using CPWs. Default Options: * pad_width: '1000um' -- width of the large rectanglular pads on either side of the junction * pad_height: '300um' -- height of the large rectanglular pads on either side of the junction * finger_width: '50um' -- width of the "finger" on either side of the junction * finger_height: '100um' -- height of the "finger" on the side of the junction * finger_space: '50um' -- height of the Josephson Junction (equivalently; space between two fingers) * pad_pos_x: '0um' -- the internal coordinate defining the center of the bottom rectangular pad * pad_pos_y: '0um' -- the internal coordinate defining the center of the bottom rectangular pad * comb_width: '50um' -- the width of the four interdigitated combs connected to either pad * comb_space_vert: '50um' -- the space between the edge of a comb and the edge of the opposite rectangular pad * comb_space_hor: '50um' -- the space between adjacent interdigitated comb structures * jj_width: '20um' -- the width of the Josephson Junction located between the two fingers of the device * cc_space: '50um' -- the space between the lower rectangular pad and the coupling capacitor below it * cc_width: '100um' -- the width of the coupling capacitor located below the bottom rectangular pad * cc_height: '100um' -- the height of the coupling capacitor located below the bottom rectangular pad * cc_topleft_space: '50um' -- the space between the upper rectangular pad and the top left coupling capacitor * cc_topleft_width: '100um' -- the width of the top left coupling capacitor pad * cc_topleft_height: '100um' -- the height of the top left coupling capacitor pad * cc_topright_space: '50um' -- the space between the upper rectangular pad and the top right coupling capacitor * cc_topright_width: '100um' -- the width of the top right coupling capacitor pad * cc_topright_height: '100um' -- the height of the top right coupling capacitor pad * position_x: '0um' -- the x-coordinate defining the center of the transmon pocket on the chip * position_y: '0um' -- the y-coordinate defining the center of the transmon pocket on the chip * rotation: '0.0' -- the angle at which the entire structure is rotated * rotation_top_pad: '180' -- internal coordinate defining the angle of rotation between top and bottom pads * layer: '1' -- all objcets are drawn assuming they are part of the same layer on a the chip """ # Default drawing options default_options = Dict(pad_width='1000um', pad_height='300um', finger_width='50um', finger_height='100um', finger_space='50um', pad_pos_x='0um', pad_pos_y='0um', comb_width='50um', comb_space_vert='50um', comb_space_hor='50um', jj_width='20um', cc_space='50um', cc_width='100um', cc_height='100um', cc_topleft_space='50um', cc_topleft_width='100um', cc_topleft_height='100um', cc_topright_space='50um', cc_topright_width='100um', cc_topright_height='100um', position_x='0um', position_y='0um', rotation='0.0', rotation_top_pad='180', layer='1') """Default drawing options""" # Name prefix of component, if user doesn't provide name component_metadata = Dict(short_name='component') """Component metadata""" def make(self): """Convert self.options into QGeometry.""" p = self.parse_options() # Parse the string options into numbers # draw the lower pad as a rectangle pad_lower = draw.rectangle(p.pad_width, p.pad_height, p.pad_pos_x, p.pad_pos_y) # draw the lower finger as a rectangle finger_lower = draw.rectangle( p.finger_width, p.finger_height, p.pad_pos_x, p.pad_pos_y + 0.49999 * (p.pad_height) + 0.49999 * (p.finger_height)) # draw the Josephson Junction rect_jj = draw.rectangle( p.jj_width, p.finger_space, p.pad_pos_x, 0.5 * (p.pad_height) + p.finger_height + 0.5 * (p.finger_space)) # draw the first comb to the right of the lower finger as a rectangle comb1_lower = draw.rectangle( p.comb_width, (2 * p.finger_height + p.finger_space - p.comb_space_vert), (0.5 * p.finger_width + p.comb_space_hor + 0.5 * p.comb_width), (0.5 * p.pad_height + 0.5 * (p.pad_pos_y + 0.5 * (p.pad_height) + 0.5 * (p.finger_height)))) # draw the second comb to the right of the lower finger by translating the first comb comb2_lower = draw.translate(comb1_lower, 2.0 * (p.comb_space_hor + p.comb_width), 0.0) # draw the first comb to the left of the lower finger comb3_lower = draw.rectangle( p.comb_width, (2 * p.finger_height + p.finger_space - p.comb_space_vert), (-0.5 * p.finger_width - 2.0 * p.comb_space_hor - 1.5 * p.comb_width), (0.5 * p.pad_height + 0.5 * (p.pad_pos_y + 0.5 * (p.pad_height) + 0.5 * (p.finger_height)))) # draw the second comb to the left of the lower finger comb4_lower = draw.translate(comb3_lower, -2.0 * (p.comb_space_hor + p.comb_width), 0.0) coupling_capacitor = draw.rectangle( p.cc_width, p.cc_height, p.pad_pos_x, p.pad_pos_y - 0.5 * (p.pad_height) - p.cc_space - 0.5 * p.cc_height) cc_topleft = draw.rectangle( p.cc_topleft_width, p.cc_topleft_height, p.pad_pos_x - 0.5 * p.pad_width + 0.5 * p.cc_topleft_width, p.pad_pos_y + 1.5 * p.pad_height + 2.0 * p.finger_height + p.finger_space + p.cc_topleft_space + 0.5 * p.cc_topleft_height) cc_topright = draw.translate( cc_topleft, p.pad_width - 0.5 * p.cc_topleft_width - 0.5 * p.cc_topright_width, 0.0) # merge the bottom elements bottom = draw.union(pad_lower, finger_lower, comb1_lower, comb2_lower, comb3_lower, comb4_lower) # create the top portion of the comb by translating and rotating # the bottom portion of the comb top = draw.translate(bottom, 0.0, p.pad_height + p.finger_space) top = draw.rotate(top, p.rotation_top_pad) # merge everything into a single design design = draw.union(bottom, top, rect_jj, coupling_capacitor, cc_topleft, cc_topright) # draw the transmon pocket bounding box pocket = draw.rectangle(1.5 * p.pad_width, 5.0 * p.pad_height) # the origin is originally set to the middle of the lower pad. # Let's move it to the center of the JJ. design = draw.translate( design, 0.0, -0.5 * p.pad_height - p.finger_height - 0.5 * p.finger_space) # now translate the final structure according to the user input design = draw.rotate(design, p.rotation, origin=(0, 0)) design = draw.translate(design, p.position_x, p.position_y) pocket = draw.rotate(pocket, p.rotation, origin=(0, 0)) pocket = draw.translate(pocket, p.position_x, p.position_y) geom = {'design': design} geom_pocket = {'pocket': pocket} self.add_qgeometry('poly', geom, layer=p.layer, subtract=False) self.add_qgeometry('poly', geom_pocket, layer=p.layer, subtract=True) ################################################################### # Add Qpin connections for coupling capacitors # define a function that both rotates and translates the # qpin coordinates def qpin_rotate_translate(x): """ This function rotates the coordinates of the three qpins according to the user inputs for "position_x", "position_y" and "rotation". """ y = list(x) z = [0.0, 0.0] z[0] = y[0] * cos(p.rotation * 3.14159 / 180) - y[1] * sin( p.rotation * 3.14159 / 180) z[1] = y[0] * sin(p.rotation * 3.14159 / 180) + y[1] * cos( p.rotation * 3.14159 / 180) z[0] = z[0] + p.position_x z[1] = z[1] + p.position_y x = (z[0], z[1]) return x # Add Qpin connections for the bottom coupling capacitor qp1a = (0.0, -0.5 * p.pad_height - p.finger_height - 0.5 * p.finger_space) qp1b = (0.0, -0.5 * p.pad_height - p.cc_space - p.cc_height - 0.5 * p.pad_height - p.finger_height - 0.5 * p.finger_space) # rotate and translate the qpin coordinates qp1a = qpin_rotate_translate(qp1a) qp1b = qpin_rotate_translate(qp1b) self.add_pin('pin1', points=np.array([qp1a, qp1b]), width=0.01, input_as_norm=True) # Add Qpin connections for top left coupling capacitor qp2a = (p.pad_pos_x - 0.5 * p.pad_width + 0.5 * p.cc_topleft_width, p.pad_pos_y + 1.5 * p.pad_height + 2.0 * p.finger_height + p.finger_space + p.cc_topleft_space + 0.5 * p.cc_topleft_height - 0.5 * p.pad_height - p.finger_height - 0.5 * p.finger_space) qp2b = (p.pad_pos_x - 0.5 * p.pad_width, p.pad_pos_y + 1.5 * p.pad_height + 2.0 * p.finger_height + p.finger_space + p.cc_topleft_space + 0.5 * p.cc_topleft_height - 0.5 * p.pad_height - p.finger_height - 0.5 * p.finger_space) qp2a = qpin_rotate_translate(qp2a) qp2b = qpin_rotate_translate(qp2b) self.add_pin('pin2', points=np.array([qp2a, qp2b]), width=0.01, input_as_norm=True) # Add Qpin connections for top right coupling capacitor qp3a = (p.pad_pos_x + 0.5 * p.pad_width - 0.5 * p.cc_topleft_width, p.pad_pos_y + 1.5 * p.pad_height + 2.0 * p.finger_height + p.finger_space + p.cc_topleft_space + 0.5 * p.cc_topleft_height - 0.5 * p.pad_height - p.finger_height - 0.5 * p.finger_space) qp3b = (p.pad_pos_x + 0.5 * p.pad_width, p.pad_pos_y + 1.5 * p.pad_height + 2.0 * p.finger_height + p.finger_space + p.cc_topleft_space + 0.5 * p.cc_topleft_height - 0.5 * p.pad_height - p.finger_height - 0.5 * p.finger_space) qp3a = qpin_rotate_translate(qp3a) qp3b = qpin_rotate_translate(qp3b) self.add_pin('pin3', points=
np.array([qp3a, qp3b])
numpy.array
# ________ # / # \ / # \ / # \/ import random import textwrap import emd_mean import AdvEMDpy import emd_basis import emd_utils import numpy as np import pandas as pd import cvxpy as cvx import seaborn as sns import matplotlib.pyplot as plt from scipy.integrate import odeint from scipy.ndimage import gaussian_filter from emd_utils import time_extension, Utility from scipy.interpolate import CubicSpline from emd_hilbert import Hilbert, hilbert_spectrum from emd_preprocess import Preprocess from emd_mean import Fluctuation from AdvEMDpy import EMD # alternate packages from PyEMD import EMD as pyemd0215 import emd as emd040 sns.set(style='darkgrid') pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001) pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time) pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series) # plot 0 - addition fig = plt.figure(figsize=(9, 4)) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('First Iteration of Sifting Algorithm') plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1) plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()], c='r', label=r'$M(t_i)$', zorder=2) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4) plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()], c='c', label=r'$m(t_j)$', zorder=3) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5) plt.yticks(ticks=[-2, -1, 0, 1, 2]) plt.xticks(ticks=[0, np.pi, 2 * np.pi], labels=[r'0', r'$\pi$', r'$2\pi$']) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/pseudo_algorithm.png') plt.show() knots = np.arange(12) time = np.linspace(0, 11, 1101) basis = emd_basis.Basis(time=time, time_series=time) b_spline_basis = basis.cubic_b_spline(knots) chsi_basis = basis.chsi_basis(knots) # plot 1 plt.title('Non-Natural Cubic B-Spline Bases at Boundary') plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $') plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $') plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $') plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $') plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $') plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $']) plt.xlim(4.4, 6.6) plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.legend(loc='upper left') plt.savefig('jss_figures/boundary_bases.png') plt.show() # plot 1a - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) knots_uniform = np.linspace(0, 2 * np.pi, 51) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0] fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Uniform Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Uniform Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Uniform Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots_uniform)): axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_uniform.png') plt.show() # plot 1b - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=1, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Statically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Statically Optimised Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Statically Optimised Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots)): axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_1.png') plt.show() # plot 1c - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=2, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Dynamically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Dynamically Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Dynamically Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots[i])): axs[i].plot(knots[i][j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_2.png') plt.show() # plot 1d - addition window = 81 fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Filtering Demonstration') axs[1].set_title('Zoomed Region') preprocess_time = pseudo_alg_time.copy() np.random.seed(1) random.seed(1) preprocess_time_series = pseudo_alg_time_series + np.random.normal(0, 0.1, len(preprocess_time)) for i in random.sample(range(1000), 500): preprocess_time_series[i] += np.random.normal(0, 1) preprocess = Preprocess(time=preprocess_time, time_series=preprocess_time_series) axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[0].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[0].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[0].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[1].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[1].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[1].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_filter.png') plt.show() # plot 1e - addition fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Smoothing Demonstration') axs[1].set_title('Zoomed Region') axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[0].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) downsampled_and_decimated = preprocess.downsample() axs[0].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 11)) downsampled = preprocess.downsample(decimate=False) axs[0].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[1].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) axs[1].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 13)) axs[1].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.06, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.06, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_smooth.png') plt.show() # plot 2 fig, axs = plt.subplots(1, 2, sharey=True) axs[0].set_title('Cubic B-Spline Bases') axs[0].plot(time, b_spline_basis[2, :].T, '--', label='Basis 1') axs[0].plot(time, b_spline_basis[3, :].T, '--', label='Basis 2') axs[0].plot(time, b_spline_basis[4, :].T, '--', label='Basis 3') axs[0].plot(time, b_spline_basis[5, :].T, '--', label='Basis 4') axs[0].legend(loc='upper left') axs[0].plot(5 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].plot(6 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-') axs[0].set_xticks([5, 6]) axs[0].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[0].set_xlim(4.5, 6.5) axs[1].set_title('Cubic Hermite Spline Bases') axs[1].plot(time, chsi_basis[10, :].T, '--') axs[1].plot(time, chsi_basis[11, :].T, '--') axs[1].plot(time, chsi_basis[12, :].T, '--') axs[1].plot(time, chsi_basis[13, :].T, '--') axs[1].plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') axs[1].set_xticks([5, 6]) axs[1].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $']) axs[1].set_xlim(4.5, 6.5) plt.savefig('jss_figures/comparing_bases.png') plt.show() # plot 3 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_dash = maxima_y[-1] * np.ones_like(max_dash_time) min_dash_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_dash = minima_y[-1] * np.ones_like(min_dash_time) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) max_discard = maxima_y[-1] max_discard_time = minima_x[-1] - maxima_x[-1] + minima_x[-1] max_discard_dash_time = np.linspace(max_discard_time - width, max_discard_time + width, 101) max_discard_dash = max_discard * np.ones_like(max_discard_dash_time) dash_2_time = np.linspace(minima_x[-1], max_discard_time, 101) dash_2 = np.linspace(minima_y[-1], max_discard, 101) end_point_time = time[-1] end_point = time_series[-1] time_reflect = np.linspace((5 - a) * np.pi, (5 + a) * np.pi, 101) time_series_reflect = np.flip(np.cos(np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101)) + np.cos(5 * np.linspace((5 - 2.6 * a) * np.pi, (5 - a) * np.pi, 101))) time_series_anti_reflect = time_series_reflect[0] - time_series_reflect utils = emd_utils.Utility(time=time, time_series=time_series_anti_reflect) anti_max_bool = utils.max_bool_func_1st_order_fd() anti_max_point_time = time_reflect[anti_max_bool] anti_max_point = time_series_anti_reflect[anti_max_bool] utils = emd_utils.Utility(time=time, time_series=time_series_reflect) no_anchor_max_time = time_reflect[utils.max_bool_func_1st_order_fd()] no_anchor_max = time_series_reflect[utils.max_bool_func_1st_order_fd()] point_1 = 5.4 length_distance = np.linspace(maxima_y[-1], minima_y[-1], 101) length_distance_time = point_1 * np.pi * np.ones_like(length_distance) length_time = np.linspace(point_1 * np.pi - width, point_1 * np.pi + width, 101) length_top = maxima_y[-1] * np.ones_like(length_time) length_bottom = minima_y[-1] * np.ones_like(length_time) point_2 = 5.2 length_distance_2 = np.linspace(time_series[-1], minima_y[-1], 101) length_distance_time_2 = point_2 * np.pi * np.ones_like(length_distance_2) length_time_2 = np.linspace(point_2 * np.pi - width, point_2 * np.pi + width, 101) length_top_2 = time_series[-1] * np.ones_like(length_time_2) length_bottom_2 = minima_y[-1] * np.ones_like(length_time_2) symmetry_axis_1_time = minima_x[-1] * np.ones(101) symmetry_axis_2_time = time[-1] * np.ones(101) symmetry_axis = np.linspace(-2, 2, 101) end_time = np.linspace(time[-1] - width, time[-1] + width, 101) end_signal = time_series[-1] * np.ones_like(end_time) anti_symmetric_time = np.linspace(time[-1] - 0.5, time[-1] + 0.5, 101) anti_symmetric_signal = time_series[-1] * np.ones_like(anti_symmetric_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Symmetry Edge Effects Example') plt.plot(time_reflect, time_series_reflect, 'g--', LineWidth=2, label=textwrap.fill('Symmetric signal', 10)) plt.plot(time_reflect[:51], time_series_anti_reflect[:51], '--', c='purple', LineWidth=2, label=textwrap.fill('Anti-symmetric signal', 10)) plt.plot(max_dash_time, max_dash, 'k-') plt.plot(min_dash_time, min_dash, 'k-') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(length_distance_time, length_distance, 'k--') plt.plot(length_distance_time_2, length_distance_2, 'k--') plt.plot(length_time, length_top, 'k-') plt.plot(length_time, length_bottom, 'k-') plt.plot(length_time_2, length_top_2, 'k-') plt.plot(length_time_2, length_bottom_2, 'k-') plt.plot(end_time, end_signal, 'k-') plt.plot(symmetry_axis_1_time, symmetry_axis, 'r--', zorder=1) plt.plot(anti_symmetric_time, anti_symmetric_signal, 'r--', zorder=1) plt.plot(symmetry_axis_2_time, symmetry_axis, 'r--', label=textwrap.fill('Axes of symmetry', 10), zorder=1) plt.text(5.1 * np.pi, -0.7, r'$\beta$L') plt.text(5.34 * np.pi, -0.05, 'L') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(max_discard_time, max_discard, c='purple', zorder=4, label=textwrap.fill('Symmetric Discard maxima', 10)) plt.scatter(end_point_time, end_point, c='orange', zorder=4, label=textwrap.fill('Symmetric Anchor maxima', 10)) plt.scatter(anti_max_point_time, anti_max_point, c='green', zorder=4, label=textwrap.fill('Anti-Symmetric maxima', 10)) plt.scatter(no_anchor_max_time, no_anchor_max, c='gray', zorder=4, label=textwrap.fill('Symmetric maxima', 10)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_symmetry_anti.png') plt.show() # plot 4 a = 0.21 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] max_dash_1 = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_dash_2 = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_dash_time_1 = maxima_x[-1] * np.ones_like(max_dash_1) max_dash_time_2 = maxima_x[-2] * np.ones_like(max_dash_1) min_dash_1 = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_dash_2 = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_dash_time_1 = minima_x[-1] * np.ones_like(min_dash_1) min_dash_time_2 = minima_x[-2] * np.ones_like(min_dash_1) dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101) dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101) dash_2_time = np.linspace(maxima_x[-1], minima_x[-2], 101) dash_2 = np.linspace(maxima_y[-1], minima_y[-2], 101) s1 = (minima_y[-2] - maxima_y[-1]) / (minima_x[-2] - maxima_x[-1]) slope_based_maximum_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) slope_based_maximum = minima_y[-1] + (slope_based_maximum_time - minima_x[-1]) * s1 max_dash_time_3 = slope_based_maximum_time * np.ones_like(max_dash_1) max_dash_3 = np.linspace(slope_based_maximum - width, slope_based_maximum + width, 101) dash_3_time = np.linspace(minima_x[-1], slope_based_maximum_time, 101) dash_3 = np.linspace(minima_y[-1], slope_based_maximum, 101) s2 = (minima_y[-1] - maxima_y[-1]) / (minima_x[-1] - maxima_x[-1]) slope_based_minimum_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) slope_based_minimum = slope_based_maximum - (slope_based_maximum_time - slope_based_minimum_time) * s2 min_dash_time_3 = slope_based_minimum_time * np.ones_like(min_dash_1) min_dash_3 = np.linspace(slope_based_minimum - width, slope_based_minimum + width, 101) dash_4_time = np.linspace(slope_based_maximum_time, slope_based_minimum_time) dash_4 = np.linspace(slope_based_maximum, slope_based_minimum) maxima_dash = np.linspace(2.5 - width, 2.5 + width, 101) maxima_dash_time_1 = maxima_x[-2] * np.ones_like(maxima_dash) maxima_dash_time_2 = maxima_x[-1] * np.ones_like(maxima_dash) maxima_dash_time_3 = slope_based_maximum_time * np.ones_like(maxima_dash) maxima_line_dash_time = np.linspace(maxima_x[-2], slope_based_maximum_time, 101) maxima_line_dash = 2.5 * np.ones_like(maxima_line_dash_time) minima_dash = np.linspace(-3.4 - width, -3.4 + width, 101) minima_dash_time_1 = minima_x[-2] * np.ones_like(minima_dash) minima_dash_time_2 = minima_x[-1] * np.ones_like(minima_dash) minima_dash_time_3 = slope_based_minimum_time * np.ones_like(minima_dash) minima_line_dash_time = np.linspace(minima_x[-2], slope_based_minimum_time, 101) minima_line_dash = -3.4 * np.ones_like(minima_line_dash_time) # slightly edit signal to make difference between slope-based method and improved slope-based method more clear time_series[time >= minima_x[-1]] = 1.5 * (time_series[time >= minima_x[-1]] - time_series[time == minima_x[-1]]) + \ time_series[time == minima_x[-1]] improved_slope_based_maximum_time = time[-1] improved_slope_based_maximum = time_series[-1] improved_slope_based_minimum_time = slope_based_minimum_time improved_slope_based_minimum = improved_slope_based_maximum + s2 * (improved_slope_based_minimum_time - improved_slope_based_maximum_time) min_dash_4 = np.linspace(improved_slope_based_minimum - width, improved_slope_based_minimum + width, 101) min_dash_time_4 = improved_slope_based_minimum_time * np.ones_like(min_dash_4) dash_final_time = np.linspace(improved_slope_based_maximum_time, improved_slope_based_minimum_time, 101) dash_final = np.linspace(improved_slope_based_maximum, improved_slope_based_minimum, 101) ax = plt.subplot(111) figure_size = plt.gcf().get_size_inches() factor = 0.9 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) plt.plot(time, time_series, LineWidth=2, label='Signal') plt.title('Slope-Based Edge Effects Example') plt.plot(max_dash_time_1, max_dash_1, 'k-') plt.plot(max_dash_time_2, max_dash_2, 'k-') plt.plot(max_dash_time_3, max_dash_3, 'k-') plt.plot(min_dash_time_1, min_dash_1, 'k-') plt.plot(min_dash_time_2, min_dash_2, 'k-') plt.plot(min_dash_time_3, min_dash_3, 'k-') plt.plot(min_dash_time_4, min_dash_4, 'k-') plt.plot(maxima_dash_time_1, maxima_dash, 'k-') plt.plot(maxima_dash_time_2, maxima_dash, 'k-') plt.plot(maxima_dash_time_3, maxima_dash, 'k-') plt.plot(minima_dash_time_1, minima_dash, 'k-') plt.plot(minima_dash_time_2, minima_dash, 'k-') plt.plot(minima_dash_time_3, minima_dash, 'k-') plt.text(4.34 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.74 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$') plt.text(4.12 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.50 * np.pi, 2, r'$\Delta{t^{max}_{M}}$') plt.text(4.30 * np.pi, 0.35, r'$s_1$') plt.text(4.43 * np.pi, -0.20, r'$s_2$') plt.text(4.30 * np.pi + (minima_x[-1] - minima_x[-2]), 0.35 + (minima_y[-1] - minima_y[-2]), r'$s_1$') plt.text(4.43 * np.pi + (slope_based_minimum_time - minima_x[-1]), -0.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.text(4.50 * np.pi + (slope_based_minimum_time - minima_x[-1]), 1.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$') plt.plot(minima_line_dash_time, minima_line_dash, 'k--') plt.plot(maxima_line_dash_time, maxima_line_dash, 'k--') plt.plot(dash_1_time, dash_1, 'k--') plt.plot(dash_2_time, dash_2, 'k--') plt.plot(dash_3_time, dash_3, 'k--') plt.plot(dash_4_time, dash_4, 'k--') plt.plot(dash_final_time, dash_final, 'k--') plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.scatter(slope_based_maximum_time, slope_based_maximum, c='orange', zorder=4, label=textwrap.fill('Slope-based maximum', 11)) plt.scatter(slope_based_minimum_time, slope_based_minimum, c='purple', zorder=4, label=textwrap.fill('Slope-based minimum', 11)) plt.scatter(improved_slope_based_maximum_time, improved_slope_based_maximum, c='deeppink', zorder=4, label=textwrap.fill('Improved slope-based maximum', 11)) plt.scatter(improved_slope_based_minimum_time, improved_slope_based_minimum, c='dodgerblue', zorder=4, label=textwrap.fill('Improved slope-based minimum', 11)) plt.xlim(3.9 * np.pi, 5.5 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-3, -2, -1, 0, 1, 2), ('-3', '-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_slope_based.png') plt.show() # plot 5 a = 0.25 width = 0.2 time = np.linspace(0, (5 - a) * np.pi, 1001) time_series = np.cos(time) + np.cos(5 * time) utils = emd_utils.Utility(time=time, time_series=time_series) max_bool = utils.max_bool_func_1st_order_fd() maxima_x = time[max_bool] maxima_y = time_series[max_bool] min_bool = utils.min_bool_func_1st_order_fd() minima_x = time[min_bool] minima_y = time_series[min_bool] A2 = np.abs(maxima_y[-2] - minima_y[-2]) / 2 A1 = np.abs(maxima_y[-1] - minima_y[-1]) / 2 P2 = 2 * np.abs(maxima_x[-2] - minima_x[-2]) P1 = 2 * np.abs(maxima_x[-1] - minima_x[-1]) Huang_time = (P1 / P2) * (time[time >= maxima_x[-2]] - time[time == maxima_x[-2]]) + maxima_x[-1] Huang_wave = (A1 / A2) * (time_series[time >= maxima_x[-2]] - time_series[time == maxima_x[-2]]) + maxima_y[-1] Coughlin_time = Huang_time Coughlin_wave = A1 * np.cos(2 * np.pi * (1 / P1) * (Coughlin_time - Coughlin_time[0])) Average_max_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2]) Average_max = (maxima_y[-2] + maxima_y[-1]) / 2 Average_min_time = minima_x[-1] + (minima_x[-1] - minima_x[-2]) Average_min = (minima_y[-2] + minima_y[-1]) / 2 utils_Huang = emd_utils.Utility(time=time, time_series=Huang_wave) Huang_max_bool = utils_Huang.max_bool_func_1st_order_fd() Huang_min_bool = utils_Huang.min_bool_func_1st_order_fd() utils_Coughlin = emd_utils.Utility(time=time, time_series=Coughlin_wave) Coughlin_max_bool = utils_Coughlin.max_bool_func_1st_order_fd() Coughlin_min_bool = utils_Coughlin.min_bool_func_1st_order_fd() Huang_max_time = Huang_time[Huang_max_bool] Huang_max = Huang_wave[Huang_max_bool] Huang_min_time = Huang_time[Huang_min_bool] Huang_min = Huang_wave[Huang_min_bool] Coughlin_max_time = Coughlin_time[Coughlin_max_bool] Coughlin_max = Coughlin_wave[Coughlin_max_bool] Coughlin_min_time = Coughlin_time[Coughlin_min_bool] Coughlin_min = Coughlin_wave[Coughlin_min_bool] max_2_x_time = np.linspace(maxima_x[-2] - width, maxima_x[-2] + width, 101) max_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) max_2_x = maxima_y[-2] * np.ones_like(max_2_x_time) min_2_x_time = np.linspace(minima_x[-2] - width, minima_x[-2] + width, 101) min_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101) min_2_x = minima_y[-2] * np.ones_like(min_2_x_time) dash_max_min_2_x = np.linspace(minima_y[-2], maxima_y[-2], 101) dash_max_min_2_x_time = 5.3 * np.pi * np.ones_like(dash_max_min_2_x) max_2_y = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101) max_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) max_2_y_time = maxima_x[-2] * np.ones_like(max_2_y) min_2_y = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101) min_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101) min_2_y_time = minima_x[-2] * np.ones_like(min_2_y) dash_max_min_2_y_time = np.linspace(minima_x[-2], maxima_x[-2], 101) dash_max_min_2_y = -1.8 * np.ones_like(dash_max_min_2_y_time) max_1_x_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101) max_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) max_1_x = maxima_y[-1] * np.ones_like(max_1_x_time) min_1_x_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101) min_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101) min_1_x = minima_y[-1] * np.ones_like(min_1_x_time) dash_max_min_1_x = np.linspace(minima_y[-1], maxima_y[-1], 101) dash_max_min_1_x_time = 5.4 * np.pi * np.ones_like(dash_max_min_1_x) max_1_y = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101) max_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) max_1_y_time = maxima_x[-1] * np.ones_like(max_1_y) min_1_y = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101) min_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101) min_1_y_time = minima_x[-1] * np.ones_like(min_1_y) dash_max_min_1_y_time = np.linspace(minima_x[-1], maxima_x[-1], 101) dash_max_min_1_y = -2.1 * np.ones_like(dash_max_min_1_y_time) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Characteristic Wave Effects Example') plt.plot(time, time_series, LineWidth=2, label='Signal') plt.scatter(Huang_max_time, Huang_max, c='magenta', zorder=4, label=textwrap.fill('Huang maximum', 10)) plt.scatter(Huang_min_time, Huang_min, c='lime', zorder=4, label=textwrap.fill('Huang minimum', 10)) plt.scatter(Coughlin_max_time, Coughlin_max, c='darkorange', zorder=4, label=textwrap.fill('Coughlin maximum', 14)) plt.scatter(Coughlin_min_time, Coughlin_min, c='dodgerblue', zorder=4, label=textwrap.fill('Coughlin minimum', 14)) plt.scatter(Average_max_time, Average_max, c='orangered', zorder=4, label=textwrap.fill('Average maximum', 14)) plt.scatter(Average_min_time, Average_min, c='cyan', zorder=4, label=textwrap.fill('Average minimum', 14)) plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima') plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima') plt.plot(Huang_time, Huang_wave, '--', c='darkviolet', label=textwrap.fill('Huang Characteristic Wave', 14)) plt.plot(Coughlin_time, Coughlin_wave, '--', c='darkgreen', label=textwrap.fill('Coughlin Characteristic Wave', 14)) plt.plot(max_2_x_time, max_2_x, 'k-') plt.plot(max_2_x_time_side, max_2_x, 'k-') plt.plot(min_2_x_time, min_2_x, 'k-') plt.plot(min_2_x_time_side, min_2_x, 'k-') plt.plot(dash_max_min_2_x_time, dash_max_min_2_x, 'k--') plt.text(5.16 * np.pi, 0.85, r'$2a_2$') plt.plot(max_2_y_time, max_2_y, 'k-') plt.plot(max_2_y_time, max_2_y_side, 'k-') plt.plot(min_2_y_time, min_2_y, 'k-') plt.plot(min_2_y_time, min_2_y_side, 'k-') plt.plot(dash_max_min_2_y_time, dash_max_min_2_y, 'k--') plt.text(4.08 * np.pi, -2.2, r'$\frac{p_2}{2}$') plt.plot(max_1_x_time, max_1_x, 'k-') plt.plot(max_1_x_time_side, max_1_x, 'k-') plt.plot(min_1_x_time, min_1_x, 'k-') plt.plot(min_1_x_time_side, min_1_x, 'k-') plt.plot(dash_max_min_1_x_time, dash_max_min_1_x, 'k--') plt.text(5.42 * np.pi, -0.1, r'$2a_1$') plt.plot(max_1_y_time, max_1_y, 'k-') plt.plot(max_1_y_time, max_1_y_side, 'k-') plt.plot(min_1_y_time, min_1_y, 'k-') plt.plot(min_1_y_time, min_1_y_side, 'k-') plt.plot(dash_max_min_1_y_time, dash_max_min_1_y, 'k--') plt.text(4.48 * np.pi, -2.5, r'$\frac{p_1}{2}$') plt.xlim(3.9 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/edge_effects_characteristic_wave.png') plt.show() # plot 6 t = np.linspace(5, 95, 100) signal_orig = np.cos(2 * np.pi * t / 50) + 0.6 * np.cos(2 * np.pi * t / 25) + 0.5 * np.sin(2 * np.pi * t / 200) util_nn = emd_utils.Utility(time=t, time_series=signal_orig) maxima = signal_orig[util_nn.max_bool_func_1st_order_fd()] minima = signal_orig[util_nn.min_bool_func_1st_order_fd()] cs_max = CubicSpline(t[util_nn.max_bool_func_1st_order_fd()], maxima) cs_min = CubicSpline(t[util_nn.min_bool_func_1st_order_fd()], minima) time = np.linspace(0, 5 * np.pi, 1001) lsq_signal = np.cos(time) + np.cos(5 * time) knots = np.linspace(0, 5 * np.pi, 101) time_extended = time_extension(time) time_series_extended = np.zeros_like(time_extended) / 0 time_series_extended[int(len(lsq_signal) - 1):int(2 * (len(lsq_signal) - 1) + 1)] = lsq_signal neural_network_m = 200 neural_network_k = 100 # forward -> P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[(-(neural_network_m + neural_network_k - col)):(-(neural_network_m - col))] P[-1, col] = 1 # for additive constant t = lsq_signal[-neural_network_m:] # test - top seed_weights = np.ones(neural_network_k) / neural_network_k weights = 0 * seed_weights.copy() train_input = P[:-1, :] lr = 0.01 for iterations in range(1000): output = np.matmul(weights, train_input) error = (t - output) gradients = error * (- train_input) # guess average gradients average_gradients = np.mean(gradients, axis=1) # steepest descent max_gradient_vector = average_gradients * (np.abs(average_gradients) == max(np.abs(average_gradients))) adjustment = - lr * average_gradients # adjustment = - lr * max_gradient_vector weights += adjustment # test - bottom weights_right = np.hstack((weights, 0)) max_count_right = 0 min_count_right = 0 i_right = 0 while ((max_count_right < 1) or (min_count_right < 1)) and (i_right < len(lsq_signal) - 1): time_series_extended[int(2 * (len(lsq_signal) - 1) + 1 + i_right)] = \ sum(weights_right * np.hstack((time_series_extended[ int(2 * (len(lsq_signal) - 1) + 1 - neural_network_k + i_right): int(2 * (len(lsq_signal) - 1) + 1 + i_right)], 1))) i_right += 1 if i_right > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_right += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)], time_series=time_series_extended[int(2 * (len(lsq_signal) - 1) + 1): int(2 * (len(lsq_signal) - 1) + 1 + i_right + 1)]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_right += 1 # backward <- P = np.zeros((int(neural_network_k + 1), neural_network_m)) for col in range(neural_network_m): P[:-1, col] = lsq_signal[int(col + 1):int(col + neural_network_k + 1)] P[-1, col] = 1 # for additive constant t = lsq_signal[:neural_network_m] vx = cvx.Variable(int(neural_network_k + 1)) objective = cvx.Minimize(cvx.norm((2 * (vx * P) + 1 - t), 2)) # linear activation function is arbitrary prob = cvx.Problem(objective) result = prob.solve(verbose=True, solver=cvx.ECOS) weights_left = np.array(vx.value) max_count_left = 0 min_count_left = 0 i_left = 0 while ((max_count_left < 1) or (min_count_left < 1)) and (i_left < len(lsq_signal) - 1): time_series_extended[int(len(lsq_signal) - 2 - i_left)] = \ 2 * sum(weights_left * np.hstack((time_series_extended[int(len(lsq_signal) - 1 - i_left): int(len(lsq_signal) - 1 - i_left + neural_network_k)], 1))) + 1 i_left += 1 if i_left > 1: emd_utils_max = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_max.max_bool_func_1st_order_fd()) > 0: max_count_left += 1 emd_utils_min = \ emd_utils.Utility(time=time_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))], time_series=time_series_extended[int(len(lsq_signal) - 1 - i_left):int(len(lsq_signal))]) if sum(emd_utils_min.min_bool_func_1st_order_fd()) > 0: min_count_left += 1 lsq_utils = emd_utils.Utility(time=time, time_series=lsq_signal) utils_extended = emd_utils.Utility(time=time_extended, time_series=time_series_extended) maxima = lsq_signal[lsq_utils.max_bool_func_1st_order_fd()] maxima_time = time[lsq_utils.max_bool_func_1st_order_fd()] maxima_extrapolate = time_series_extended[utils_extended.max_bool_func_1st_order_fd()][-1] maxima_extrapolate_time = time_extended[utils_extended.max_bool_func_1st_order_fd()][-1] minima = lsq_signal[lsq_utils.min_bool_func_1st_order_fd()] minima_time = time[lsq_utils.min_bool_func_1st_order_fd()] minima_extrapolate = time_series_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] minima_extrapolate_time = time_extended[utils_extended.min_bool_func_1st_order_fd()][-2:] ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('Single Neuron Neural Network Example') plt.plot(time, lsq_signal, zorder=2, label='Signal') plt.plot(time_extended, time_series_extended, c='g', zorder=1, label=textwrap.fill('Extrapolated signal', 12)) plt.scatter(maxima_time, maxima, c='r', zorder=3, label='Maxima') plt.scatter(minima_time, minima, c='b', zorder=3, label='Minima') plt.scatter(maxima_extrapolate_time, maxima_extrapolate, c='magenta', zorder=3, label=textwrap.fill('Extrapolated maxima', 12)) plt.scatter(minima_extrapolate_time, minima_extrapolate, c='cyan', zorder=4, label=textwrap.fill('Extrapolated minima', 12)) plt.plot(((time[-302] + time[-301]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k', label=textwrap.fill('Neural network inputs', 13)) plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time[-302] + time[-301]) / 2), ((time[-302] + time[-301]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='k') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1002]) / 2), ((time_extended[-1001] + time_extended[-1002]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='k') plt.plot(((time_extended[-1001] + time_extended[-1002]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='k') plt.plot(((time[-202] + time[-201]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed', label=textwrap.fill('Neural network targets', 13)) plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), -2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time[-202] + time[-201]) / 2), ((time[-202] + time[-201]) / 2) + 0.1, 100), 2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2), ((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), -2.75 * np.ones(100), c='gray') plt.plot(np.linspace(((time_extended[-1001] + time_extended[-1000]) / 2), ((time_extended[-1001] + time_extended[-1000]) / 2) - 0.1, 100), 2.75 * np.ones(100), c='gray') plt.plot(((time_extended[-1001] + time_extended[-1000]) / 2) * np.ones(100), np.linspace(-2.75, 2.75, 100), c='gray', linestyle='dashed') plt.xlim(3.4 * np.pi, 5.6 * np.pi) plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$')) plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2')) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/neural_network.png') plt.show() # plot 6a np.random.seed(0) time = np.linspace(0, 5 * np.pi, 1001) knots_51 = np.linspace(0, 5 * np.pi, 51) time_series = np.cos(2 * time) +
np.cos(4 * time)
numpy.cos
import os from PIL import Image import cv2 from os import listdir from os.path import join import matplotlib.pyplot as plt import matplotlib from matplotlib.colors import LogNorm from io_utils.io_common import create_folder from viz_utils.constants import PlotMode, BackgroundType import pylab import numpy as np import cmocean import shapely import cartopy.crs as ccrs import cartopy.feature as cfeature import cartopy def select_colormap(field_name): ''' Based on the name if the field it chooses a colormap from cmocean Args: field_name: Returns: ''' if np.any([field_name.find(x) != -1 for x in ('ssh', 'srfhgt', 'adt','surf_el')]): # cmaps_fields.append(cmocean.cm.deep_r) return cmocean.cm.curl elif np.any([field_name.find(x) != -1 for x in ('temp', 'sst', 'temperature')]): return cmocean.cm.thermal elif np.any([field_name.find(x) != -1 for x in ('vorticity', 'vort')]): return cmocean.cm.curl elif np.any([field_name.find(x) != -1 for x in ('salin', 'sss', 'sal')]): return cmocean.cm.haline elif field_name.find('error') != -1: return cmocean.cm.diff elif field_name.find('binary') != -1: return cmocean.cm.oxy elif np.any([field_name.find(x) != -1 for x in ('u_', 'v_', 'u-vel.', 'v-vel.','velocity')]): return cmocean.cm.speed class EOAImageVisualizer: """This class makes plenty of plots assuming we are plotting Geospatial data (maps). It is made to read xarrays, numpy arrays, and numpy arrays in dictionaries vizobj = new EOAImageVisualizer(disp_images=True, output_folder='output', lats=[lats],lons=[lons]) """ _COLORS = ['y', 'r', 'c', 'b', 'g', 'w', 'k', 'y', 'r', 'c', 'b', 'g', 'w', 'k'] _figsize = 8 _font_size = 30 _units = '' _max_imgs_per_row = 4 _mincbar = np.nan # User can set a min and max colorbar values to 'force' same color bar to all plots _maxcbar = np.nan _flip_data = True _eoas_pyutils_path = './eoas_pyutils'# This is the path where the eoas_utils folder is stored with respect to the main project _contourf = False # When plotting non-regular grids and need precision _background = BackgroundType.BLUE_MARBLE_LR # Select the background to use _auto_colormap = True # Selects the colormap based on the name of the field _show_var_names = False # Includes the name of the field name in the titles _additional_polygons = [] # MUST BE SHAPELY GEOMETRIES In case we want to include additional polygons in the plots (all of them) # If you want to add a streamplot of a vector field. It must be a dictionary with keys x,y,u,v # and optional density, color, cmap, arrowsize, arrowstyle, minlength _vector_field = None _norm = None # Use to normalize the colormap. For example with LogNorm # vizobj = EOAImageVisualizer(disp_images=True, output_folder='output', # lats=[lats],lons=[lons]) def __init__(self, disp_images=True, output_folder='output', lats=[-90,90], lons =[-180,180], projection=ccrs.PlateCarree(), **kwargs): # All the arguments that are passed to the constructor of the class MUST have its name on it. self._disp_images = disp_images self._output_folder = output_folder self._projection = projection bbox = self.getExtent(lats, lons) self._extent = bbox self._lats = lats self._lons = lons self._fig_prop = (bbox[1]-bbox[0])/(bbox[3]-bbox[2]) self._contour_labels = False for arg_name, arg_value in kwargs.items(): self.__dict__["_" + arg_name] = arg_value print(self.__dict__["_" + arg_name]) def __getattr__(self, attr): '''Generic getter for all the properties of the class''' return self.__dict__["_" + attr] def __setattr__(self, attr, value): '''Generic setter for all the properties of the class''' self.__dict__["_" + attr] = value def add_colorbar(self, fig, im, ax, show_color_bar, label=""): # https://matplotlib.org/api/_as_gen/matplotlib.pyplot.colorbar.html if show_color_bar: font_size_cbar = self._font_size * .5 # TODO how to make this automatic and works always cbar = fig.colorbar(im, ax=ax, shrink=.7) cbar.ax.tick_params(labelsize=font_size_cbar) if label != "": cbar.set_label(label, fontsize=font_size_cbar*1.2) else: cbar.set_label(self._units, fontsize=font_size_cbar*1.2) def plot_slice_eoa(self, c_img, ax, cmap='gray', mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan) -> None: """ Plots a 2D img for EOA data. :param c_img: 2D array :param ax: geoaxes :return: """ c_ax = ax if self._flip_data: origin = 'lower' else: origin = 'upper' if self._background == BackgroundType.CARTO_DEF: c_ax.stock_img() else: if self._background == BackgroundType.BLUE_MARBLE_LR: img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bluemarble.png')) if self._background == BackgroundType.BLUE_MARBLE_HR: img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bluemarble_5400x2700.jpg')) if self._background == BackgroundType.TOPO: img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/etopo.png')) if self._background == BackgroundType.BATHYMETRY: img = plt.imread(join(self._eoas_pyutils_path,'viz_utils/imgs/bathymetry_3600x1800.jpg')) c_ax.imshow(img, origin='upper', extent=(-180,180,-90,90), transform=ccrs.PlateCarree()) if mode == PlotMode.RASTER or mode == PlotMode.MERGED: if self._contourf: im = c_ax.contourf(self._lons, self._lats, c_img, num_colors=255, cmap='inferno', extent=self._extent) else: if np.isnan(mincbar): im = c_ax.imshow(c_img, extent=self._extent, origin=origin, cmap=cmap, transform=self._projection, norm=self._norm) else: im = c_ax.imshow(c_img, extent=self._extent, origin=origin, cmap=cmap, vmin=mincbar, vmax=maxcbar, transform=self._projection, norm=self._norm) if mode == PlotMode.CONTOUR or mode == PlotMode.MERGED: c_ax.set_extent(self.getExtent(list(self._lats), list(self._lons))) if mode == PlotMode.CONTOUR: im = c_ax.contour(c_img, extent=self._extent, transform=self._projection) if mode == PlotMode.MERGED: if self._contour_labels: c_ax.contour(c_img, self._contour_labels, colors='r', extent=self._extent, transform=self._projection) else: c_ax.contour(c_img, extent=self._extent, transform=self._projection) if len(self._additional_polygons) > 0: pol_lats = [] pol_lons = [] for c_polygon in self._additional_polygons: if isinstance(c_polygon, shapely.geometry.linestring.LineString): x,y = c_polygon.xy elif isinstance(c_polygon, shapely.geometry.polygon.Polygon): x, y = c_polygon.exterior.xy pol_lats += y pol_lons += x c_ax.plot(x,y, transform=self._projection, c='r') # Adds a threshold to the plot to see the polygons c_ax.set_extent(self.getExtent(list(self._lats) + pol_lats, list(self._lons) + pol_lons, 0.5)) if self._vector_field != None: try: u = self._vector_field['u'] v = self._vector_field['v'] x = self._vector_field['x'] y = self._vector_field['y'] vec_keys = self._vector_field.keys() c = 'r' density = 1 linewidth = 3 vec_cmap = cmocean.cm.solar if 'color' in vec_keys: c = self._vector_field['color'] if 'density' in vec_keys: density = self._vector_field['density'] if 'linewidth' in vec_keys: linewidth = self._vector_field['linewidth'] if 'cmap' in vec_keys: vec_cmap = self._vector_field['cmap'] c_ax.set_extent(self.getExtent(list(self._lats), list(self._lons))) c_ax.streamplot(x, y, u, v, transform=self._projection, density=density, color=c, cmap=vec_cmap, linewidth=linewidth) except Exception as e: print(F"Couldn't add vector field e:{e}") gl = c_ax.gridlines(draw_labels=True, color='grey', alpha=0.5, linestyle='--') # gl.xlabel_style = {'size': self._font_size/2, 'color': '#aaaaaa', 'weight':'bold'} font_coords = {'size': self._font_size*.6} gl.xlabel_style = font_coords gl.ylabel_style = font_coords gl.top_labels = False gl.right_labels = False return im def get_proper_size(self, rows, cols): """ Obtains the proper size for a figure. :param rows: how many rows will the figure have :param cols: how many colswill the figure have :param prop: Proportion is the proportion to use w/h :return: """ if rows == 1: return self._figsize * cols * self._fig_prop, self._figsize else: return self._figsize * cols * self._fig_prop, self._figsize * rows def _close_figure(self): """Depending on what is disp_images, the figures are displayed or just closed""" if self._disp_images: plt.show() else: plt.close() def getExtent(self, lats, lons, expand_ext=0.0): ''' Obtains the bbox of the coordinates. If included threshold then increases the bbox in all directions with that thres Args: lats: lons: inc_threshold: Returns: ''' minLat = np.amin(lats) - expand_ext maxLat = np.amax(lats) + expand_ext minLon = np.amin(lons) - expand_ext maxLon = np.amax(lons) + expand_ext bbox = (minLon, maxLon, minLat, maxLat) return bbox def xr_summary(self, ds): """ Prints a summary of the netcdf (global attributes, variables, etc) :param ds: :return: """ print("\n========== Global attributes =========") for name in ds.attrs: print(F"{name} = {getattr(ds, name)}") print("\n========== Dimensions =========") for name in ds.dims: print(F"{name}: {ds[name].shape}") print("\n========== Coordinates =========") for name in ds.coords: print(F"{name}: {ds[name].shape}") print("\n========== Variables =========") for cur_variable_name in ds.variables: cur_var = ds[cur_variable_name] print(F"{cur_variable_name}: {cur_var.dims} {cur_var.shape}") def nc_summary(self, ds): """ Prints a summary of the netcdf (global attributes, variables, etc) :param ds: :return: """ print("\n========== Global attributes =========") for name in ds.ncattrs(): print(F"{name} = {getattr(ds, name)}") print("\n========== Variables =========") netCDFvars = ds.variables for cur_variable_name in netCDFvars.keys(): cur_var = ds.variables[cur_variable_name] print(F"Dimensions for {cur_variable_name}: {cur_var.dimensions} {cur_var.shape}") def add_roads(self, ax): # Names come from: https://www.naturalearthdata.com/features/ # -- Add states roads = cfeature.NaturalEarthFeature( category='cultural', name='roads', scale='10m', facecolor='none') ax.add_feature(roads, edgecolor='black') return ax def add_states(self, ax): # Names come from: https://www.naturalearthdata.com/features/ # -- Add states states_provinces = cfeature.NaturalEarthFeature( category='cultural', name='admin_1_states_provinces_lines', scale='50m', facecolor='none') ax.add_feature(states_provinces, edgecolor='gray') return ax def plot_scatter_data(self, lats=None, lons=None, bbox=None, s=1, c='blue', cmap='plasma', title=''): ''' This function plots points in a map :param bbox: :return: ''' if bbox is None: bbox = (-180, 180, -90, 90) if lats is None: lats = self.lats if lons is None: lons = self.lons fig, ax = plt.subplots(1, 1, figsize=(self._figsize, self._figsize), subplot_kw={'projection': ccrs.PlateCarree()}) ax.set_extent(bbox) # If we do not set this, it will cropp it to the limits of the locations ax.gridlines() im = ax.scatter(lons, lats, s=s, c=c, cmap=cmap) fig.colorbar(im, ax=ax, shrink=0.7) ax.coastlines() plt.title(title) plt.show() def plot_3d_data_npdict(self, np_variables:list, var_names:list, z_levels= [], title='', file_name_prefix='', cmap=None, z_names = [], show_color_bar=True, plot_mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan): """ Plots multiple z_levels for multiple fields. It uses rows for each depth, and columns for each variable """ create_folder(self._output_folder) orig_cmap = cmap # If the user do not requires any z-leve, then all are plotted if len(z_levels) == 0: z_levels = range(np_variables[var_names[0]].shape[0]) cols = np.min((self._max_imgs_per_row, len(var_names))) if cols == len(var_names): rows = len(z_levels) else: rows = int(len(z_levels) * np.ceil(len(var_names)/cols)) fig, _axs = plt.subplots(rows, cols, figsize=self.get_proper_size(rows, cols), subplot_kw={'projection': self._projection}) for c_zlevel, c_slice in enumerate(z_levels): # Iterates over the z-levels # Verify the index of the z_levels are the original ones. if len(z_names) != 0: c_slice_txt = z_names[c_slice] else: c_slice_txt = c_slice c_mincbar = np.nan c_maxcbar = np.nan for idx_var, c_var in enumerate(var_names): # Iterate over the fields if rows*cols == 1: # Single figure ax = _axs else: ax = _axs.flatten()[c_zlevel*len(var_names) + idx_var] # Here we chose the min and max colorbars for each field if not(np.all(np.isnan(mincbar))): if type(mincbar) is list: c_mincbar = mincbar[idx_var] else: c_mincbar = mincbar if not(np.all(np.isnan(maxcbar))): if type(mincbar) is list: c_maxcbar = maxcbar[idx_var] else: c_maxcbar = maxcbar # By default we select the colorbar from the name of the variable if self._auto_colormap and orig_cmap is None: cmap = select_colormap(c_var) else: # If there is an array of colormaps we select the one for this field if type(orig_cmap) is list: cmap = orig_cmap[idx_var] else: # If it is just one cmap, then we use it for all the fields cmap = orig_cmap im = self.plot_slice_eoa(np_variables[c_var][c_slice,:,:], ax, cmap=cmap, mode=plot_mode, mincbar=c_mincbar, maxcbar=c_maxcbar) if self._show_var_names: c_title = F'{var_names[idx_var]} {title}' else: c_title = F'{title}' if len(z_levels) > 1: c_title += F"Z - level: {c_slice_txt}" ax.set_title(c_title, fontsize=self._font_size) self.add_colorbar(fig, im, ax, show_color_bar) plt.tight_layout(pad=.5) file_name = F'{file_name_prefix}' pylab.savefig(join(self._output_folder, F'{file_name}.png'), bbox_inches='tight') self._close_figure() def plot_2d_data_xr(self, np_variables:list, var_names:list, title='', file_name_prefix='', cmap='viridis', show_color_bar=True, plot_mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan): ''' Wrapper function to receive raw 2D numpy data. It calls the 'main' function for 3D plotting :param np_variables: :param var_names: :param title: :param file_name_prefix: :param cmap: :param flip_data: :param rot_90: :param show_color_bar: :param plot_mode: :param mincbar: :param maxcbar: :return: ''' npdict_3d = {} for i, field_name in enumerate(var_names): npdict_3d[field_name] = np.expand_dims(np_variables[field_name], axis=0) self.plot_3d_data_npdict(npdict_3d, var_names, z_levels=[0], title=title, file_name_prefix=file_name_prefix, cmap=cmap, z_names = [], show_color_bar=show_color_bar, plot_mode=plot_mode, mincbar=mincbar, maxcbar=maxcbar) def plot_2d_data_np(self, np_variables:list, var_names:list, title='', file_name_prefix='', cmap=None, flip_data=False, rot_90=False, show_color_bar=True, plot_mode=PlotMode.RASTER, mincbar=np.nan, maxcbar=np.nan): ''' Wrapper function to receive raw 2D numpy data. It calls the 'main' function for 3D plotting :param np_variables: Numpy variables. They can be with shape [fields, x, y] or just a single field with shape [x,y] :param var_names: :param title: :param file_name_prefix: :param cmap: :param flip_data: :param rot_90: :param show_color_bar: :param plot_mode: :param mincbar: :param maxcbar: :return: ''' npdict_3d = {} for i, field_name in enumerate(var_names): if len(np_variables.shape) == 3: c_np_data = np_variables[i, :, :] else: c_np_data = np_variables # Single field if rot_90: c_np_data = np.rot90(c_np_data) if flip_data: c_np_data = np.flip(
np.flip(c_np_data)
numpy.flip
import os import string from collections import Counter from datetime import datetime from functools import partial from pathlib import Path from typing import Optional import numpy as np import pandas as pd from scipy.stats.stats import chisquare from tangled_up_in_unicode import block, block_abbr, category, category_long, script from pandas_profiling.config import Settings from pandas_profiling.model.summary_helpers_image import ( extract_exif, hash_image, is_image_truncated, open_image, ) def mad(arr: np.ndarray) -> np.ndarray: """Median Absolute Deviation: a "Robust" version of standard deviation. Indices variability of the sample. https://en.wikipedia.org/wiki/Median_absolute_deviation """ return np.median(np.abs(arr - np.median(arr))) def named_aggregate_summary(series: pd.Series, key: str) -> dict: summary = { f"max_{key}":
np.max(series)
numpy.max
# ________ # / # \ / # \ / # \/ import random import textwrap import emd_mean import AdvEMDpy import emd_basis import emd_utils import numpy as np import pandas as pd import cvxpy as cvx import seaborn as sns import matplotlib.pyplot as plt from scipy.integrate import odeint from scipy.ndimage import gaussian_filter from emd_utils import time_extension, Utility from scipy.interpolate import CubicSpline from emd_hilbert import Hilbert, hilbert_spectrum from emd_preprocess import Preprocess from emd_mean import Fluctuation from AdvEMDpy import EMD # alternate packages from PyEMD import EMD as pyemd0215 import emd as emd040 sns.set(style='darkgrid') pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001) pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time) pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series) # plot 0 - addition fig = plt.figure(figsize=(9, 4)) ax = plt.subplot(111) plt.gcf().subplots_adjust(bottom=0.10) plt.title('First Iteration of Sifting Algorithm') plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1) plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()], c='r', label=r'$M(t_i)$', zorder=2) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4) plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()], pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()], c='c', label=r'$m(t_j)$', zorder=3) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5) plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5) plt.yticks(ticks=[-2, -1, 0, 1, 2]) plt.xticks(ticks=[0, np.pi, 2 * np.pi], labels=[r'0', r'$\pi$', r'$2\pi$']) box_0 = ax.get_position() ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height]) ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.savefig('jss_figures/pseudo_algorithm.png') plt.show() knots = np.arange(12) time = np.linspace(0, 11, 1101) basis = emd_basis.Basis(time=time, time_series=time) b_spline_basis = basis.cubic_b_spline(knots) chsi_basis = basis.chsi_basis(knots) # plot 1 plt.title('Non-Natural Cubic B-Spline Bases at Boundary') plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $') plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $') plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $') plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $') plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $') plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $']) plt.xlim(4.4, 6.6) plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-') plt.legend(loc='upper left') plt.savefig('jss_figures/boundary_bases.png') plt.show() # plot 1a - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) knots_uniform = np.linspace(0, 2 * np.pi, 51) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0] fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Uniform Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Uniform Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Uniform Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots_uniform)): axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_uniform.png') plt.show() # plot 1b - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=1, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Statically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Statically Optimised Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Statically Optimised Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots)): axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_1.png') plt.show() # plot 1c - addition knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001) knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time) emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series) imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric', optimise_knots=2, verbose=False) fig, axs = plt.subplots(3, 1) fig.subplots_adjust(hspace=0.6) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Time Series and Dynamically Optimised Knots') axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100) axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].set_title('IMF 1 and Dynamically Knots') axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[2].set_title('IMF 2 and Dynamically Knots') axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100) axs[2].set_yticks(ticks=[-2, 0, 2]) axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[0].legend(loc='lower left') axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots') for i in range(3): for j in range(1, len(knots[i])): axs[i].plot(knots[i][j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey') plt.savefig('jss_figures/knot_2.png') plt.show() # plot 1d - addition window = 81 fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Filtering Demonstration') axs[1].set_title('Zoomed Region') preprocess_time = pseudo_alg_time.copy() np.random.seed(1) random.seed(1) preprocess_time_series = pseudo_alg_time_series + np.random.normal(0, 0.1, len(preprocess_time)) for i in random.sample(range(1000), 500): preprocess_time_series[i] += np.random.normal(0, 1) preprocess = Preprocess(time=preprocess_time, time_series=preprocess_time_series) axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[0].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[0].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[0].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black', label=textwrap.fill('Zoomed region', 10)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black') axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black') axs[0].set_yticks(ticks=[-2, 0, 2]) axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi]) axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$']) axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[1].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12)) axs[1].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13)) axs[1].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12)) axs[1].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize interpolation filter', 14)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey', label=textwrap.fill('Quantile window', 12)) axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey') axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi) axs[1].set_ylim(-3, 3) axs[1].set_yticks(ticks=[-2, 0, 2]) axs[1].set_xticks(ticks=[np.pi]) axs[1].set_xticklabels(labels=[r'$\pi$']) box_0 = axs[0].get_position() axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height]) axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15)) box_1 = axs[1].get_position() axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height]) plt.savefig('jss_figures/preprocess_filter.png') plt.show() # plot 1e - addition fig, axs = plt.subplots(2, 1) fig.subplots_adjust(hspace=0.4) figure_size = plt.gcf().get_size_inches() factor = 0.8 plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1])) plt.gcf().subplots_adjust(bottom=0.10) axs[0].set_title('Preprocess Smoothing Demonstration') axs[1].set_title('Zoomed Region') axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)') axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12)) axs[0].plot(preprocess_time, preprocess.hp()[1], label=textwrap.fill('Hodrick-Prescott smoothing', 12)) axs[0].plot(preprocess_time, preprocess.hw(order=51)[1], label=textwrap.fill('Henderson-Whittaker smoothing', 13)) downsampled_and_decimated = preprocess.downsample() axs[0].plot(downsampled_and_decimated[0], downsampled_and_decimated[1], label=textwrap.fill('Downsampled & decimated', 11)) downsampled = preprocess.downsample(decimate=False) axs[0].plot(downsampled[0], downsampled[1], label=textwrap.fill('Downsampled', 13)) axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 *
np.ones(101)
numpy.ones
import numpy as np from typing import Tuple, Union, Optional from autoarray.structures.arrays.two_d import array_2d_util from autoarray.geometry import geometry_util from autoarray import numba_util from autoarray.mask import mask_2d_util @numba_util.jit() def grid_2d_centre_from(grid_2d_slim: np.ndarray) -> Tuple[float, float]: """ Returns the centre of a grid from a 1D grid. Parameters ---------- grid_2d_slim The 1D grid of values which are mapped to a 2D array. Returns ------- (float, float) The (y,x) central coordinates of the grid. """ centre_y = (np.max(grid_2d_slim[:, 0]) + np.min(grid_2d_slim[:, 0])) / 2.0 centre_x = (np.max(grid_2d_slim[:, 1]) + np.min(grid_2d_slim[:, 1])) / 2.0 return centre_y, centre_x @numba_util.jit() def grid_2d_slim_via_mask_from( mask_2d: np.ndarray, pixel_scales: Union[float, Tuple[float, float]], sub_size: int, origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided into a finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes the (y,x) scaled coordinates a the centre of every sub-pixel defined by this 2D mask array. The sub-grid is returned on an array of shape (total_unmasked_pixels*sub_size**2, 2). y coordinates are stored in the 0 index of the second dimension, x coordinates in the 1 index. Masked coordinates are therefore removed and not included in the slimmed grid. Grid2D are defined from the top-left corner, where the first unmasked sub-pixel corresponds to index 0. Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth. Parameters ---------- mask_2d A 2D array of bools, where `False` values are unmasked and therefore included as part of the calculated sub-grid. pixel_scales The (y,x) scaled units to pixel units conversion factor of the 2D mask array. sub_size The size of the sub-grid that each pixel of the 2D mask array is divided into. origin : (float, flloat) The (y,x) origin of the 2D array, which the sub-grid is shifted around. Returns ------- ndarray A slimmed sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask array. The sub grid array has dimensions (total_unmasked_pixels*sub_size**2, 2). Examples -------- mask = np.array([[True, False, True], [False, False, False] [True, False, True]]) grid_slim = grid_2d_slim_via_mask_from(mask=mask, pixel_scales=(0.5, 0.5), sub_size=1, origin=(0.0, 0.0)) """ total_sub_pixels = mask_2d_util.total_sub_pixels_2d_from(mask_2d, sub_size) grid_slim = np.zeros(shape=(total_sub_pixels, 2)) centres_scaled = geometry_util.central_scaled_coordinate_2d_from( shape_native=mask_2d.shape, pixel_scales=pixel_scales, origin=origin ) sub_index = 0 y_sub_half = pixel_scales[0] / 2 y_sub_step = pixel_scales[0] / (sub_size) x_sub_half = pixel_scales[1] / 2 x_sub_step = pixel_scales[1] / (sub_size) for y in range(mask_2d.shape[0]): for x in range(mask_2d.shape[1]): if not mask_2d[y, x]: y_scaled = (y - centres_scaled[0]) * pixel_scales[0] x_scaled = (x - centres_scaled[1]) * pixel_scales[1] for y1 in range(sub_size): for x1 in range(sub_size): grid_slim[sub_index, 0] = -( y_scaled - y_sub_half + y1 * y_sub_step + (y_sub_step / 2.0) ) grid_slim[sub_index, 1] = ( x_scaled - x_sub_half + x1 * x_sub_step + (x_sub_step / 2.0) ) sub_index += 1 return grid_slim def grid_2d_via_mask_from( mask_2d: np.ndarray, pixel_scales: Union[float, Tuple[float, float]], sub_size: int, origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided into a finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes the (y,x) scaled coordinates at the centre of every sub-pixel defined by this 2D mask array. The sub-grid is returned in its native dimensions with shape (total_y_pixels*sub_size, total_x_pixels*sub_size). y coordinates are stored in the 0 index of the second dimension, x coordinates in the 1 index. Masked pixels are given values (0.0, 0.0). Grids are defined from the top-left corner, where the first unmasked sub-pixel corresponds to index 0. Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth. Parameters ---------- mask_2d A 2D array of bools, where `False` values are unmasked and therefore included as part of the calculated sub-grid. pixel_scales The (y,x) scaled units to pixel units conversion factor of the 2D mask array. sub_size The size of the sub-grid that each pixel of the 2D mask array is divided into. origin : (float, flloat) The (y,x) origin of the 2D array, which the sub-grid is shifted around. Returns ------- ndarray A sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask array. The sub grid array has dimensions (total_y_pixels*sub_size, total_x_pixels*sub_size). Examples -------- mask = np.array([[True, False, True], [False, False, False] [True, False, True]]) grid_2d = grid_2d_via_mask_from(mask=mask, pixel_scales=(0.5, 0.5), sub_size=1, origin=(0.0, 0.0)) """ grid_2d_slim = grid_2d_slim_via_mask_from( mask_2d=mask_2d, pixel_scales=pixel_scales, sub_size=sub_size, origin=origin ) return grid_2d_native_from( grid_2d_slim=grid_2d_slim, mask_2d=mask_2d, sub_size=sub_size ) def grid_2d_slim_via_shape_native_from( shape_native: Tuple[int, int], pixel_scales: Union[float, Tuple[float, float]], sub_size: int, origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided into a finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes the (y,x) scaled coordinates at the centre of every sub-pixel defined by this 2D mask array. The sub-grid is returned in its slimmed dimensions with shape (total_pixels**2*sub_size**2, 2). y coordinates are stored in the 0 index of the second dimension, x coordinates in the 1 index. Grid2D are defined from the top-left corner, where the first sub-pixel corresponds to index [0,0]. Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth. Parameters ---------- shape_native The (y,x) shape of the 2D array the sub-grid of coordinates is computed for. pixel_scales The (y,x) scaled units to pixel units conversion factor of the 2D mask array. sub_size The size of the sub-grid that each pixel of the 2D mask array is divided into. origin The (y,x) origin of the 2D array, which the sub-grid is shifted around. Returns ------- ndarray A sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask array. The sub grid is slimmed and has dimensions (total_unmasked_pixels*sub_size**2, 2). Examples -------- mask = np.array([[True, False, True], [False, False, False] [True, False, True]]) grid_2d_slim = grid_2d_slim_via_shape_native_from(shape_native=(3,3), pixel_scales=(0.5, 0.5), sub_size=2, origin=(0.0, 0.0)) """ return grid_2d_slim_via_mask_from( mask_2d=np.full(fill_value=False, shape=shape_native), pixel_scales=pixel_scales, sub_size=sub_size, origin=origin, ) def grid_2d_via_shape_native_from( shape_native: Tuple[int, int], pixel_scales: Union[float, Tuple[float, float]], sub_size: int, origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided into a finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes the (y,x) scaled coordinates at the centre of every sub-pixel defined by this 2D mask array. The sub-grid is returned in its native dimensions with shape (total_y_pixels*sub_size, total_x_pixels*sub_size). y coordinates are stored in the 0 index of the second dimension, x coordinates in the 1 index. Grids are defined from the top-left corner, where the first sub-pixel corresponds to index [0,0]. Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth. Parameters ---------- shape_native The (y,x) shape of the 2D array the sub-grid of coordinates is computed for. pixel_scales The (y,x) scaled units to pixel units conversion factor of the 2D mask array. sub_size The size of the sub-grid that each pixel of the 2D mask array is divided into. origin : (float, flloat) The (y,x) origin of the 2D array, which the sub-grid is shifted around. Returns ------- ndarray A sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask array. The sub grid array has dimensions (total_y_pixels*sub_size, total_x_pixels*sub_size). Examples -------- grid_2d = grid_2d_via_shape_native_from(shape_native=(3, 3), pixel_scales=(1.0, 1.0), sub_size=2, origin=(0.0, 0.0)) """ return grid_2d_via_mask_from( mask_2d=np.full(fill_value=False, shape=shape_native), pixel_scales=pixel_scales, sub_size=sub_size, origin=origin, ) @numba_util.jit() def grid_scaled_2d_slim_radial_projected_from( extent: np.ndarray, centre: Tuple[float, float], pixel_scales: Union[float, Tuple[float, float]], sub_size: int, shape_slim: Optional[int] = 0, ) -> np.ndarray: """ Determine a projected radial grid of points from a 2D region of coordinates defined by an extent [xmin, xmax, ymin, ymax] and with a (y,x) centre. This functions operates as follows: 1) Given the region defined by the extent [xmin, xmax, ymin, ymax], the algorithm finds the longest 1D distance of the 4 paths from the (y,x) centre to the edge of the region (e.g. following the positive / negative y and x axes). 2) Use the pixel-scale corresponding to the direction chosen (e.g. if the positive x-axis was the longest, the pixel_scale in the x dimension is used). 3) Determine the number of pixels between the centre and the edge of the region using the longest path between the two chosen above. 4) Create a (y,x) grid of radial points where all points are at the centre's y value = 0.0 and the x values iterate from the centre in increasing steps of the pixel-scale. 5) Rotate these radial coordinates by the input `angle` clockwise. A schematric is shown below: ------------------- | | |<- - - - ->x | x = centre | | <-> = longest radial path from centre to extent edge | | ------------------- Using the centre x above, this function finds the longest radial path to the edge of the extent window. The returned `grid_radii` represents a radial set of points that in 1D sample the 2D grid outwards from its centre. This grid stores the radial coordinates as (y,x) values (where all y values are the same) as opposed to a 1D data structure so that it can be used in functions which require that a 2D grid structure is input. Parameters ---------- extent The extent of the grid the radii grid is computed using, with format [xmin, xmax, ymin, ymax] centre : (float, flloat) The (y,x) central coordinate which the radial grid is traced outwards from. pixel_scales The (y,x) scaled units to pixel units conversion factor of the 2D mask array. sub_size The size of the sub-grid that each pixel of the 2D mask array is divided into. shape_slim Manually choose the shape of the 1D projected grid that is returned. If 0, the border based on the 2D grid is used (due to numba None cannot be used as a default value). Returns ------- ndarray A radial set of points sampling the longest distance from the centre to the edge of the extent in along the positive x-axis. """ distance_to_positive_x = extent[1] - centre[1] distance_to_positive_y = extent[3] - centre[0] distance_to_negative_x = centre[1] - extent[0] distance_to_negative_y = centre[0] - extent[2] scaled_distance = max( [ distance_to_positive_x, distance_to_positive_y, distance_to_negative_x, distance_to_negative_y, ] ) if (scaled_distance == distance_to_positive_y) or ( scaled_distance == distance_to_negative_y ): pixel_scale = pixel_scales[0] else: pixel_scale = pixel_scales[1] if shape_slim == 0: shape_slim = sub_size * int((scaled_distance / pixel_scale)) + 1 grid_scaled_2d_slim_radii = np.zeros((shape_slim, 2)) grid_scaled_2d_slim_radii[:, 0] += centre[0] radii = centre[1] for slim_index in range(shape_slim): grid_scaled_2d_slim_radii[slim_index, 1] = radii radii += pixel_scale / sub_size return grid_scaled_2d_slim_radii @numba_util.jit() def grid_pixels_2d_slim_from( grid_scaled_2d_slim: np.ndarray, shape_native: Tuple[int, int], pixel_scales: Union[float, Tuple[float, float]], origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ Convert a slimmed grid of 2d (y,x) scaled coordinates to a slimmed grid of 2d (y,x) pixel coordinate values. Pixel coordinates are returned as floats such that they include the decimal offset from each pixel's top-left corner relative to the input scaled coordinate. The input and output grids are both slimmed and therefore shape (total_pixels, 2). The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to the highest (most positive) y scaled coordinate and lowest (most negative) x scaled coordinate on the gird. The scaled grid is defined by an origin and coordinates are shifted to this origin before computing their 1D grid pixel coordinate values. Parameters ---------- grid_scaled_2d_slim: np.ndarray The slimmed grid of 2D (y,x) coordinates in scaled units which are converted to pixel value coordinates. shape_native The (y,x) shape of the original 2D array the scaled coordinates were computed on. pixel_scales The (y,x) scaled units to pixel units conversion factor of the original 2D array. origin : (float, flloat) The (y,x) origin of the grid, which the scaled grid is shifted to. Returns ------- ndarray A slimmed grid of 2D (y,x) pixel-value coordinates with dimensions (total_pixels, 2). Examples -------- grid_scaled_2d_slim = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]]) grid_pixels_2d_slim = grid_scaled_2d_slim_from(grid_scaled_2d_slim=grid_scaled_2d_slim, shape=(2,2), pixel_scales=(0.5, 0.5), origin=(0.0, 0.0)) """ grid_pixels_2d_slim = np.zeros((grid_scaled_2d_slim.shape[0], 2)) centres_scaled = geometry_util.central_scaled_coordinate_2d_from( shape_native=shape_native, pixel_scales=pixel_scales, origin=origin ) for slim_index in range(grid_scaled_2d_slim.shape[0]): grid_pixels_2d_slim[slim_index, 0] = ( (-grid_scaled_2d_slim[slim_index, 0] / pixel_scales[0]) + centres_scaled[0] + 0.5 ) grid_pixels_2d_slim[slim_index, 1] = ( (grid_scaled_2d_slim[slim_index, 1] / pixel_scales[1]) + centres_scaled[1] + 0.5 ) return grid_pixels_2d_slim @numba_util.jit() def grid_pixel_centres_2d_slim_from( grid_scaled_2d_slim: np.ndarray, shape_native: Tuple[int, int], pixel_scales: Union[float, Tuple[float, float]], origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ Convert a slimmed grid of 2D (y,x) scaled coordinates to a slimmed grid of 2D (y,x) pixel values. Pixel coordinates are returned as integers such that they map directly to the pixel they are contained within. The input and output grids are both slimmed and therefore shape (total_pixels, 2). The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to the highest (most positive) y scaled coordinate and lowest (most negative) x scaled coordinate on the gird. The scaled coordinate grid is defined by the class attribute origin, and coordinates are shifted to this origin before computing their 1D grid pixel indexes. Parameters ---------- grid_scaled_2d_slim: np.ndarray The slimmed grid of 2D (y,x) coordinates in scaled units which is converted to pixel indexes. shape_native The (y,x) shape of the original 2D array the scaled coordinates were computed on. pixel_scales The (y,x) scaled units to pixel units conversion factor of the original 2D array. origin : (float, flloat) The (y,x) origin of the grid, which the scaled grid is shifted Returns ------- ndarray A slimmed grid of 2D (y,x) pixel indexes with dimensions (total_pixels, 2). Examples -------- grid_scaled_2d_slim = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]]) grid_pixels_2d_slim = grid_scaled_2d_slim_from(grid_scaled_2d_slim=grid_scaled_2d_slim, shape=(2,2), pixel_scales=(0.5, 0.5), origin=(0.0, 0.0)) """ grid_pixels_2d_slim = np.zeros((grid_scaled_2d_slim.shape[0], 2)) centres_scaled = geometry_util.central_scaled_coordinate_2d_from( shape_native=shape_native, pixel_scales=pixel_scales, origin=origin ) for slim_index in range(grid_scaled_2d_slim.shape[0]): grid_pixels_2d_slim[slim_index, 0] = int( (-grid_scaled_2d_slim[slim_index, 0] / pixel_scales[0]) + centres_scaled[0] + 0.5 ) grid_pixels_2d_slim[slim_index, 1] = int( (grid_scaled_2d_slim[slim_index, 1] / pixel_scales[1]) + centres_scaled[1] + 0.5 ) return grid_pixels_2d_slim @numba_util.jit() def grid_pixel_indexes_2d_slim_from( grid_scaled_2d_slim: np.ndarray, shape_native: Tuple[int, int], pixel_scales: Union[float, Tuple[float, float]], origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ Convert a slimmed grid of 2D (y,x) scaled coordinates to a slimmed grid of pixel indexes. Pixel coordinates are returned as integers such that they are the pixel from the top-left of the 2D grid going rights and then downwards. The input and output grids are both slimmed and have shapes (total_pixels, 2) and (total_pixels,). For example: The pixel at the top-left, whose native index is [0,0], corresponds to slimmed pixel index 0. The fifth pixel on the top row, whose native index is [0,5], corresponds to slimmed pixel index 4. The first pixel on the second row, whose native index is [0,1], has slimmed pixel index 10 if a row has 10 pixels. The scaled coordinate grid is defined by the class attribute origin, and coordinates are shifted to this origin before computing their 1D grid pixel indexes. The input and output grids are both of shape (total_pixels, 2). Parameters ---------- grid_scaled_2d_slim: np.ndarray The slimmed grid of 2D (y,x) coordinates in scaled units which is converted to slimmed pixel indexes. shape_native The (y,x) shape of the original 2D array the scaled coordinates were computed on. pixel_scales The (y,x) scaled units to pixel units conversion factor of the original 2D array. origin : (float, flloat) The (y,x) origin of the grid, which the scaled grid is shifted. Returns ------- ndarray A grid of slimmed pixel indexes with dimensions (total_pixels,). Examples -------- grid_scaled_2d_slim = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]]) grid_pixel_indexes_2d_slim = grid_pixel_indexes_2d_slim_from(grid_scaled_2d_slim=grid_scaled_2d_slim, shape=(2,2), pixel_scales=(0.5, 0.5), origin=(0.0, 0.0)) """ grid_pixels_2d_slim = grid_pixel_centres_2d_slim_from( grid_scaled_2d_slim=grid_scaled_2d_slim, shape_native=shape_native, pixel_scales=pixel_scales, origin=origin, ) grid_pixel_indexes_2d_slim = np.zeros(grid_pixels_2d_slim.shape[0]) for slim_index in range(grid_pixels_2d_slim.shape[0]): grid_pixel_indexes_2d_slim[slim_index] = int( grid_pixels_2d_slim[slim_index, 0] * shape_native[1] + grid_pixels_2d_slim[slim_index, 1] ) return grid_pixel_indexes_2d_slim @numba_util.jit() def grid_scaled_2d_slim_from( grid_pixels_2d_slim: np.ndarray, shape_native: Tuple[int, int], pixel_scales: Union[float, Tuple[float, float]], origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ Convert a slimmed grid of 2D (y,x) pixel coordinates to a slimmed grid of 2D (y,x) scaled values. The input and output grids are both slimmed and therefore shape (total_pixels, 2). The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to the highest (most positive) y scaled coordinate and lowest (most negative) x scaled coordinate on the gird. The scaled coordinate origin is defined by the class attribute origin, and coordinates are shifted to this origin after computing their values from the 1D grid pixel indexes. Parameters ---------- grid_pixels_2d_slim: np.ndarray The slimmed grid of (y,x) coordinates in pixel values which is converted to scaled coordinates. shape_native The (y,x) shape of the original 2D array the scaled coordinates were computed on. pixel_scales The (y,x) scaled units to pixel units conversion factor of the original 2D array. origin : (float, flloat) The (y,x) origin of the grid, which the scaled grid is shifted. Returns ------- ndarray A slimmed grid of 2d scaled coordinates with dimensions (total_pixels, 2). Examples -------- grid_pixels_2d_slim = np.array([[0,0], [0,1], [1,0], [1,1]) grid_pixels_2d_slim = grid_scaled_2d_slim_from(grid_pixels_2d_slim=grid_pixels_2d_slim, shape=(2,2), pixel_scales=(0.5, 0.5), origin=(0.0, 0.0)) """ grid_scaled_2d_slim = np.zeros((grid_pixels_2d_slim.shape[0], 2)) centres_scaled = geometry_util.central_scaled_coordinate_2d_from( shape_native=shape_native, pixel_scales=pixel_scales, origin=origin ) for slim_index in range(grid_scaled_2d_slim.shape[0]): grid_scaled_2d_slim[slim_index, 0] = ( -(grid_pixels_2d_slim[slim_index, 0] - centres_scaled[0] - 0.5) * pixel_scales[0] ) grid_scaled_2d_slim[slim_index, 1] = ( grid_pixels_2d_slim[slim_index, 1] - centres_scaled[1] - 0.5 ) * pixel_scales[1] return grid_scaled_2d_slim @numba_util.jit() def grid_pixel_centres_2d_from( grid_scaled_2d: np.ndarray, shape_native: Tuple[int, int], pixel_scales: Union[float, Tuple[float, float]], origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ Convert a native grid of 2D (y,x) scaled coordinates to a native grid of 2D (y,x) pixel values. Pixel coordinates are returned as integers such that they map directly to the pixel they are contained within. The input and output grids are both native resolution and therefore have shape (y_pixels, x_pixels, 2). The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to the highest (most positive) y scaled coordinate and lowest (most negative) x scaled coordinate on the gird. The scaled coordinate grid is defined by the class attribute origin, and coordinates are shifted to this origin before computing their 1D grid pixel indexes. Parameters ---------- grid_scaled_2d: np.ndarray The native grid of 2D (y,x) coordinates in scaled units which is converted to pixel indexes. shape_native The (y,x) shape of the original 2D array the scaled coordinates were computed on. pixel_scales The (y,x) scaled units to pixel units conversion factor of the original 2D array. origin : (float, flloat) The (y,x) origin of the grid, which the scaled grid is shifted Returns ------- ndarray A native grid of 2D (y,x) pixel indexes with dimensions (y_pixels, x_pixels, 2). Examples -------- grid_scaled_2d = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]]) grid_pixel_centres_2d = grid_pixel_centres_2d_from(grid_scaled_2d=grid_scaled_2d, shape=(2,2), pixel_scales=(0.5, 0.5), origin=(0.0, 0.0)) """ grid_pixels_2d = np.zeros((grid_scaled_2d.shape[0], grid_scaled_2d.shape[1], 2)) centres_scaled = geometry_util.central_scaled_coordinate_2d_from( shape_native=shape_native, pixel_scales=pixel_scales, origin=origin ) for y in range(grid_scaled_2d.shape[0]): for x in range(grid_scaled_2d.shape[1]): grid_pixels_2d[y, x, 0] = int( (-grid_scaled_2d[y, x, 0] / pixel_scales[0]) + centres_scaled[0] + 0.5 ) grid_pixels_2d[y, x, 1] = int( (grid_scaled_2d[y, x, 1] / pixel_scales[1]) + centres_scaled[1] + 0.5 ) return grid_pixels_2d @numba_util.jit() def relocated_grid_via_jit_from(grid, border_grid): """ Relocate the coordinates of a grid to its border if they are outside the border, where the border is defined as all pixels at the edge of the grid's mask (see *mask._border_1d_indexes*). This is performed as follows: 1: Use the mean value of the grid's y and x coordinates to determine the origin of the grid. 2: Compute the radial distance of every grid coordinate from the origin. 3: For every coordinate, find its nearest pixel in the border. 4: Determine if it is outside the border, by comparing its radial distance from the origin to its paired border pixel's radial distance. 5: If its radial distance is larger, use the ratio of radial distances to move the coordinate to the border (if its inside the border, do nothing). The method can be used on uniform or irregular grids, however for irregular grids the border of the 'image-plane' mask is used to define border pixels. Parameters ---------- grid : Grid2D The grid (uniform or irregular) whose pixels are to be relocated to the border edge if outside it. border_grid : Grid2D The grid of border (y,x) coordinates. """ grid_relocated = np.zeros(grid.shape) grid_relocated[:, :] = grid[:, :] border_origin = np.zeros(2) border_origin[0] = np.mean(border_grid[:, 0]) border_origin[1] = np.mean(border_grid[:, 1]) border_grid_radii = np.sqrt( np.add( np.square(np.subtract(border_grid[:, 0], border_origin[0])), np.square(np.subtract(border_grid[:, 1], border_origin[1])), ) ) border_min_radii = np.min(border_grid_radii) grid_radii = np.sqrt( np.add( np.square(np.subtract(grid[:, 0], border_origin[0])), np.square(np.subtract(grid[:, 1], border_origin[1])), ) ) for pixel_index in range(grid.shape[0]): if grid_radii[pixel_index] > border_min_radii: closest_pixel_index = np.argmin( np.square(grid[pixel_index, 0] - border_grid[:, 0]) + np.square(grid[pixel_index, 1] - border_grid[:, 1]) ) move_factor = ( border_grid_radii[closest_pixel_index] / grid_radii[pixel_index] ) if move_factor < 1.0: grid_relocated[pixel_index, :] = ( move_factor * (grid[pixel_index, :] - border_origin[:]) + border_origin[:] ) return grid_relocated @numba_util.jit() def furthest_grid_2d_slim_index_from( grid_2d_slim: np.ndarray, slim_indexes: np.ndarray, coordinate: Tuple[float, float] ) -> int: distance_to_centre = 0.0 for slim_index in slim_indexes: y = grid_2d_slim[slim_index, 0] x = grid_2d_slim[slim_index, 1] distance_to_centre_new = (x - coordinate[1]) ** 2 + (y - coordinate[0]) ** 2 if distance_to_centre_new >= distance_to_centre: distance_to_centre = distance_to_centre_new furthest_grid_2d_slim_index = slim_index return furthest_grid_2d_slim_index def grid_2d_slim_from( grid_2d_native: np.ndarray, mask: np.ndarray, sub_size: int ) -> np.ndarray: """ For a native 2D grid and mask of shape [total_y_pixels, total_x_pixels, 2], map the values of all unmasked pixels to a slimmed grid of shape [total_unmasked_pixels, 2]. The pixel coordinate origin is at the top left corner of the native grid and goes right-wards and downwards, such that for an grid of shape (3,3) where all pixels are unmasked: - pixel [0,0] of the 2D grid will correspond to index 0 of the 1D grid. - pixel [0,1] of the 2D grid will correspond to index 1 of the 1D grid. - pixel [1,0] of the 2D grid will correspond to index 4 of the 1D grid. Parameters ---------- grid_2d_native : ndarray The native grid of (y,x) values which are mapped to the slimmed grid. mask_2d A 2D array of bools, where `False` values mean unmasked and are included in the mapping. sub_size The size (sub_size x sub_size) of each unmasked pixels sub-array. Returns ------- ndarray A 1D grid of values mapped from the 2D grid with dimensions (total_unmasked_pixels). """ grid_1d_slim_y = array_2d_util.array_2d_slim_from( array_2d_native=grid_2d_native[:, :, 0], mask_2d=mask, sub_size=sub_size ) grid_1d_slim_x = array_2d_util.array_2d_slim_from( array_2d_native=grid_2d_native[:, :, 1], mask_2d=mask, sub_size=sub_size ) return np.stack((grid_1d_slim_y, grid_1d_slim_x), axis=-1) def grid_2d_native_from( grid_2d_slim: np.ndarray, mask_2d: np.ndarray, sub_size: int ) -> np.ndarray: """ For a slimmed 2D grid of shape [total_unmasked_pixels, 2], that was computed by extracting the unmasked values from a native 2D grid of shape [total_y_pixels, total_x_pixels, 2], map the slimmed grid's coordinates back to the native 2D grid where masked values are set to zero. This uses a 1D array 'slim_to_native' where each index gives the 2D pixel indexes of the grid's native unmasked pixels, for example: - If slim_to_native[0] = [0,0], the first value of the 1D array maps to the pixels [0,0,:] of the native 2D grid. - If slim_to_native[1] = [0,1], the second value of the 1D array maps to the pixels [0,1,:] of the native 2D grid. - If slim_to_native[4] = [1,1], the fifth value of the 1D array maps to the pixels [1,1,:] of the native 2D grid. Parameters ---------- grid_2d_slim The (y,x) values of the slimmed 2D grid which are mapped to the native 2D grid. mask_2d A 2D array of bools, where `False` values mean unmasked and are included in the mapping. sub_size The size (sub_size x sub_size) of each unmasked pixels sub-array. Returns ------- ndarray A NumPy array of shape [total_y_pixels, total_x_pixels, 2] corresponding to the (y,x) values of the native 2D mapped from the slimmed grid. """ grid_2d_native_y = array_2d_util.array_2d_native_from( array_2d_slim=grid_2d_slim[:, 0], mask_2d=mask_2d, sub_size=sub_size ) grid_2d_native_x = array_2d_util.array_2d_native_from( array_2d_slim=grid_2d_slim[:, 1], mask_2d=mask_2d, sub_size=sub_size ) return np.stack((grid_2d_native_y, grid_2d_native_x), axis=-1) @numba_util.jit() def grid_2d_slim_upscaled_from( grid_slim: np.ndarray, upscale_factor: int, pixel_scales: Union[float, Tuple[float, float]], ) -> np.ndarray: """ From an input slimmed 2D grid, return an upscaled slimmed 2D grid where (y,x) coordinates are added at an upscaled resolution to each grid coordinate, analogous to a sub-grid. Parameters ---------- grid_slim The slimmed grid of (y,x) coordinates over which a square uniform grid is overlaid. upscale_factor The upscaled resolution at which the new grid coordinates are computed. pixel_scales The pixel scale of the uniform grid that laid over the irregular grid of (y,x) coordinates. """ grid_2d_slim_upscaled = np.zeros( shape=(grid_slim.shape[0] * upscale_factor ** 2, 2) ) upscale_index = 0 y_upscale_half = pixel_scales[0] / 2 y_upscale_step = pixel_scales[0] / upscale_factor x_upscale_half = pixel_scales[1] / 2 x_upscale_step = pixel_scales[1] / upscale_factor for slim_index in range(grid_slim.shape[0]): y_grid = grid_slim[slim_index, 0] x_grid = grid_slim[slim_index, 1] for y in range(upscale_factor): for x in range(upscale_factor): grid_2d_slim_upscaled[upscale_index, 0] = ( y_grid + y_upscale_half - y * y_upscale_step - (y_upscale_step / 2.0) ) grid_2d_slim_upscaled[upscale_index, 1] = ( x_grid - x_upscale_half + x * x_upscale_step + (x_upscale_step / 2.0) ) upscale_index += 1 return grid_2d_slim_upscaled def grid_2d_of_points_within_radius( radius: float, centre: Tuple[float, float], grid_2d: np.ndarray ): y_inside = [] x_inside = [] for i in range(len(grid_2d[:, 0])): if (grid_2d[i, 0] - centre[0]) ** 2 + ( grid_2d[i, 1] - centre[1] ) ** 2 > radius ** 2: y_inside.append(grid_2d[i, 0]) x_inside.append(grid_2d[i, 1]) return np.asarray(y_inside, x_inside) def compute_polygon_area(points): x = points[:, 1] y = points[:, 0] return 0.5 * np.abs(np.dot(x,
np.roll(y, 1)
numpy.roll
import sys import numpy as np from matplotlib import pyplot as pl from rw import WriteGTiff fn = '../pozo-steep-vegetated-pcl.npy' pts = np.load(fn) x, y, z, c = pts[:, 0], pts[:, 1], pts[:, 2], pts[:, 5] ix = (0.2 * (x - x.min())).astype('int') iy = (0.2 * (y - y.min())).astype('int') shape = (100, 100) xb = np.arange(shape[1]+1) yb = np.arange(shape[0]+1) fg, ax = pl.subplots(ncols = 2, nrows = 2, figsize = (10.24, 10.24), sharex = True, sharey = True) uc = (2, 5) for j in range(len(uc)): print('Class %i' % uc[j]) b = c == uc[j] cx, cy, cz = ix[b], iy[b], z[b] mean =
np.zeros(shape)
numpy.zeros
from abc import ABCMeta, abstractmethod import os from vmaf.tools.misc import make_absolute_path, run_process from vmaf.tools.stats import ListStats __copyright__ = "Copyright 2016-2018, Netflix, Inc." __license__ = "Apache, Version 2.0" import re import numpy as np import ast from vmaf import ExternalProgramCaller, to_list from vmaf.config import VmafConfig, VmafExternalConfig from vmaf.core.executor import Executor from vmaf.core.result import Result from vmaf.tools.reader import YuvReader class FeatureExtractor(Executor): """ FeatureExtractor takes in a list of assets, and run feature extraction on them, and return a list of corresponding results. A FeatureExtractor must specify a unique type and version combination (by the TYPE and VERSION attribute), so that the Result generated by it can be identified. A derived class of FeatureExtractor must: 1) Override TYPE and VERSION 2) Override _generate_result(self, asset), which call a command-line executable and generate feature scores in a log file. 3) Override _get_feature_scores(self, asset), which read the feature scores from the log file, and return the scores in a dictionary format. For an example, follow VmafFeatureExtractor. """ __metaclass__ = ABCMeta @property @abstractmethod def ATOM_FEATURES(self): raise NotImplementedError def _read_result(self, asset): result = {} result.update(self._get_feature_scores(asset)) executor_id = self.executor_id return Result(asset, executor_id, result) @classmethod def get_scores_key(cls, atom_feature): return "{type}_{atom_feature}_scores".format( type=cls.TYPE, atom_feature=atom_feature) @classmethod def get_score_key(cls, atom_feature): return "{type}_{atom_feature}_score".format( type=cls.TYPE, atom_feature=atom_feature) def _get_feature_scores(self, asset): # routine to read the feature scores from the log file, and return # the scores in a dictionary format. log_file_path = self._get_log_file_path(asset) atom_feature_scores_dict = {} atom_feature_idx_dict = {} for atom_feature in self.ATOM_FEATURES: atom_feature_scores_dict[atom_feature] = [] atom_feature_idx_dict[atom_feature] = 0 with open(log_file_path, 'rt') as log_file: for line in log_file.readlines(): for atom_feature in self.ATOM_FEATURES: re_template = "{af}: ([0-9]+) ([a-zA-Z0-9.-]+)".format(af=atom_feature) mo = re.match(re_template, line) if mo: cur_idx = int(mo.group(1)) assert cur_idx == atom_feature_idx_dict[atom_feature] # parse value, allowing NaN and inf val = float(mo.group(2)) if np.isnan(val) or np.isinf(val): val = None atom_feature_scores_dict[atom_feature].append(val) atom_feature_idx_dict[atom_feature] += 1 continue len_score = len(atom_feature_scores_dict[self.ATOM_FEATURES[0]]) assert len_score != 0 for atom_feature in self.ATOM_FEATURES[1:]: assert len_score == len(atom_feature_scores_dict[atom_feature]), \ "Feature data possibly corrupt. Run cleanup script and try again." feature_result = {} for atom_feature in self.ATOM_FEATURES: scores_key = self.get_scores_key(atom_feature) feature_result[scores_key] = atom_feature_scores_dict[atom_feature] return feature_result class VmafFeatureExtractor(FeatureExtractor): TYPE = "VMAF_feature" # VERSION = '0.1' # vmaf_study; Anush's VIF fix # VERSION = '0.2' # expose vif_num, vif_den, adm_num, adm_den, anpsnr # VERSION = '0.2.1' # expose vif num/den of each scale # VERSION = '0.2.2' # adm abs-->fabs, corrected border handling, uniform reading with option of offset for input YUV, updated VIF corner case # VERSION = '0.2.2b' # expose adm_den/num_scalex # VERSION = '0.2.3' # AVX for VMAF convolution; update adm features by folding noise floor into per coef # VERSION = '0.2.4' # Fix a bug in adm feature passing scale into dwt_quant_step # VERSION = '0.2.4b' # Modify by adding ADM noise floor outside cube root; add derived feature motion2 VERSION = '0.2.4c' # Modify by moving motion2 to c code ATOM_FEATURES = ['vif', 'adm', 'ansnr', 'motion', 'motion2', 'vif_num', 'vif_den', 'adm_num', 'adm_den', 'anpsnr', 'vif_num_scale0', 'vif_den_scale0', 'vif_num_scale1', 'vif_den_scale1', 'vif_num_scale2', 'vif_den_scale2', 'vif_num_scale3', 'vif_den_scale3', 'adm_num_scale0', 'adm_den_scale0', 'adm_num_scale1', 'adm_den_scale1', 'adm_num_scale2', 'adm_den_scale2', 'adm_num_scale3', 'adm_den_scale3', ] DERIVED_ATOM_FEATURES = ['vif_scale0', 'vif_scale1', 'vif_scale2', 'vif_scale3', 'vif2', 'adm2', 'adm3', 'adm_scale0', 'adm_scale1', 'adm_scale2', 'adm_scale3', ] ADM2_CONSTANT = 0 ADM_SCALE_CONSTANT = 0 def _generate_result(self, asset): # routine to call the command-line executable and generate feature # scores in the log file. quality_width, quality_height = asset.quality_width_height log_file_path = self._get_log_file_path(asset) yuv_type=self._get_workfile_yuv_type(asset) ref_path=asset.ref_workfile_path dis_path=asset.dis_workfile_path w=quality_width h=quality_height logger = self.logger ExternalProgramCaller.call_vmaf_feature(yuv_type, ref_path, dis_path, w, h, log_file_path, logger) @classmethod def _post_process_result(cls, result): # override Executor._post_process_result result = super(VmafFeatureExtractor, cls)._post_process_result(result) # adm2 = # (adm_num + ADM2_CONSTANT) / (adm_den + ADM2_CONSTANT) adm2_scores_key = cls.get_scores_key('adm2') adm_num_scores_key = cls.get_scores_key('adm_num') adm_den_scores_key = cls.get_scores_key('adm_den') result.result_dict[adm2_scores_key] = list( (np.array(result.result_dict[adm_num_scores_key]) + cls.ADM2_CONSTANT) / (np.array(result.result_dict[adm_den_scores_key]) + cls.ADM2_CONSTANT) ) # vif_scalei = vif_num_scalei / vif_den_scalei, i = 0, 1, 2, 3 vif_num_scale0_scores_key = cls.get_scores_key('vif_num_scale0') vif_den_scale0_scores_key = cls.get_scores_key('vif_den_scale0') vif_num_scale1_scores_key = cls.get_scores_key('vif_num_scale1') vif_den_scale1_scores_key = cls.get_scores_key('vif_den_scale1') vif_num_scale2_scores_key = cls.get_scores_key('vif_num_scale2') vif_den_scale2_scores_key = cls.get_scores_key('vif_den_scale2') vif_num_scale3_scores_key = cls.get_scores_key('vif_num_scale3') vif_den_scale3_scores_key = cls.get_scores_key('vif_den_scale3') vif_scale0_scores_key = cls.get_scores_key('vif_scale0') vif_scale1_scores_key = cls.get_scores_key('vif_scale1') vif_scale2_scores_key = cls.get_scores_key('vif_scale2') vif_scale3_scores_key = cls.get_scores_key('vif_scale3') result.result_dict[vif_scale0_scores_key] = list( (np.array(result.result_dict[vif_num_scale0_scores_key]) / np.array(result.result_dict[vif_den_scale0_scores_key])) ) result.result_dict[vif_scale1_scores_key] = list( (np.array(result.result_dict[vif_num_scale1_scores_key]) / np.array(result.result_dict[vif_den_scale1_scores_key])) ) result.result_dict[vif_scale2_scores_key] = list( (np.array(result.result_dict[vif_num_scale2_scores_key]) / np.array(result.result_dict[vif_den_scale2_scores_key])) ) result.result_dict[vif_scale3_scores_key] = list( (np.array(result.result_dict[vif_num_scale3_scores_key]) / np.array(result.result_dict[vif_den_scale3_scores_key])) ) # vif2 = # ((vif_num_scale0 / vif_den_scale0) + (vif_num_scale1 / vif_den_scale1) + # (vif_num_scale2 / vif_den_scale2) + (vif_num_scale3 / vif_den_scale3)) / 4.0 vif_scores_key = cls.get_scores_key('vif2') result.result_dict[vif_scores_key] = list( ( (np.array(result.result_dict[vif_num_scale0_scores_key]) / np.array(result.result_dict[vif_den_scale0_scores_key])) + (np.array(result.result_dict[vif_num_scale1_scores_key]) / np.array(result.result_dict[vif_den_scale1_scores_key])) + (np.array(result.result_dict[vif_num_scale2_scores_key]) / np.array(result.result_dict[vif_den_scale2_scores_key])) + (np.array(result.result_dict[vif_num_scale3_scores_key]) / np.array(result.result_dict[vif_den_scale3_scores_key])) ) / 4.0 ) # adm_scalei = adm_num_scalei / adm_den_scalei, i = 0, 1, 2, 3 adm_num_scale0_scores_key = cls.get_scores_key('adm_num_scale0') adm_den_scale0_scores_key = cls.get_scores_key('adm_den_scale0') adm_num_scale1_scores_key = cls.get_scores_key('adm_num_scale1') adm_den_scale1_scores_key = cls.get_scores_key('adm_den_scale1') adm_num_scale2_scores_key = cls.get_scores_key('adm_num_scale2') adm_den_scale2_scores_key = cls.get_scores_key('adm_den_scale2') adm_num_scale3_scores_key = cls.get_scores_key('adm_num_scale3') adm_den_scale3_scores_key = cls.get_scores_key('adm_den_scale3') adm_scale0_scores_key = cls.get_scores_key('adm_scale0') adm_scale1_scores_key = cls.get_scores_key('adm_scale1') adm_scale2_scores_key = cls.get_scores_key('adm_scale2') adm_scale3_scores_key = cls.get_scores_key('adm_scale3') result.result_dict[adm_scale0_scores_key] = list( (np.array(result.result_dict[adm_num_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT) / (np.array(result.result_dict[adm_den_scale0_scores_key]) + cls.ADM_SCALE_CONSTANT) ) result.result_dict[adm_scale1_scores_key] = list( (np.array(result.result_dict[adm_num_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT) / (np.array(result.result_dict[adm_den_scale1_scores_key]) + cls.ADM_SCALE_CONSTANT) ) result.result_dict[adm_scale2_scores_key] = list( (np.array(result.result_dict[adm_num_scale2_scores_key]) + cls.ADM_SCALE_CONSTANT) / (
np.array(result.result_dict[adm_den_scale2_scores_key])
numpy.array
import numpy as np from typing import Tuple, Union, Optional from autoarray.structures.arrays.two_d import array_2d_util from autoarray.geometry import geometry_util from autoarray import numba_util from autoarray.mask import mask_2d_util @numba_util.jit() def grid_2d_centre_from(grid_2d_slim: np.ndarray) -> Tuple[float, float]: """ Returns the centre of a grid from a 1D grid. Parameters ---------- grid_2d_slim The 1D grid of values which are mapped to a 2D array. Returns ------- (float, float) The (y,x) central coordinates of the grid. """ centre_y = (np.max(grid_2d_slim[:, 0]) + np.min(grid_2d_slim[:, 0])) / 2.0 centre_x = (np.max(grid_2d_slim[:, 1]) + np.min(grid_2d_slim[:, 1])) / 2.0 return centre_y, centre_x @numba_util.jit() def grid_2d_slim_via_mask_from( mask_2d: np.ndarray, pixel_scales: Union[float, Tuple[float, float]], sub_size: int, origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided into a finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes the (y,x) scaled coordinates a the centre of every sub-pixel defined by this 2D mask array. The sub-grid is returned on an array of shape (total_unmasked_pixels*sub_size**2, 2). y coordinates are stored in the 0 index of the second dimension, x coordinates in the 1 index. Masked coordinates are therefore removed and not included in the slimmed grid. Grid2D are defined from the top-left corner, where the first unmasked sub-pixel corresponds to index 0. Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth. Parameters ---------- mask_2d A 2D array of bools, where `False` values are unmasked and therefore included as part of the calculated sub-grid. pixel_scales The (y,x) scaled units to pixel units conversion factor of the 2D mask array. sub_size The size of the sub-grid that each pixel of the 2D mask array is divided into. origin : (float, flloat) The (y,x) origin of the 2D array, which the sub-grid is shifted around. Returns ------- ndarray A slimmed sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask array. The sub grid array has dimensions (total_unmasked_pixels*sub_size**2, 2). Examples -------- mask = np.array([[True, False, True], [False, False, False] [True, False, True]]) grid_slim = grid_2d_slim_via_mask_from(mask=mask, pixel_scales=(0.5, 0.5), sub_size=1, origin=(0.0, 0.0)) """ total_sub_pixels = mask_2d_util.total_sub_pixels_2d_from(mask_2d, sub_size) grid_slim = np.zeros(shape=(total_sub_pixels, 2)) centres_scaled = geometry_util.central_scaled_coordinate_2d_from( shape_native=mask_2d.shape, pixel_scales=pixel_scales, origin=origin ) sub_index = 0 y_sub_half = pixel_scales[0] / 2 y_sub_step = pixel_scales[0] / (sub_size) x_sub_half = pixel_scales[1] / 2 x_sub_step = pixel_scales[1] / (sub_size) for y in range(mask_2d.shape[0]): for x in range(mask_2d.shape[1]): if not mask_2d[y, x]: y_scaled = (y - centres_scaled[0]) * pixel_scales[0] x_scaled = (x - centres_scaled[1]) * pixel_scales[1] for y1 in range(sub_size): for x1 in range(sub_size): grid_slim[sub_index, 0] = -( y_scaled - y_sub_half + y1 * y_sub_step + (y_sub_step / 2.0) ) grid_slim[sub_index, 1] = ( x_scaled - x_sub_half + x1 * x_sub_step + (x_sub_step / 2.0) ) sub_index += 1 return grid_slim def grid_2d_via_mask_from( mask_2d: np.ndarray, pixel_scales: Union[float, Tuple[float, float]], sub_size: int, origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided into a finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes the (y,x) scaled coordinates at the centre of every sub-pixel defined by this 2D mask array. The sub-grid is returned in its native dimensions with shape (total_y_pixels*sub_size, total_x_pixels*sub_size). y coordinates are stored in the 0 index of the second dimension, x coordinates in the 1 index. Masked pixels are given values (0.0, 0.0). Grids are defined from the top-left corner, where the first unmasked sub-pixel corresponds to index 0. Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth. Parameters ---------- mask_2d A 2D array of bools, where `False` values are unmasked and therefore included as part of the calculated sub-grid. pixel_scales The (y,x) scaled units to pixel units conversion factor of the 2D mask array. sub_size The size of the sub-grid that each pixel of the 2D mask array is divided into. origin : (float, flloat) The (y,x) origin of the 2D array, which the sub-grid is shifted around. Returns ------- ndarray A sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask array. The sub grid array has dimensions (total_y_pixels*sub_size, total_x_pixels*sub_size). Examples -------- mask = np.array([[True, False, True], [False, False, False] [True, False, True]]) grid_2d = grid_2d_via_mask_from(mask=mask, pixel_scales=(0.5, 0.5), sub_size=1, origin=(0.0, 0.0)) """ grid_2d_slim = grid_2d_slim_via_mask_from( mask_2d=mask_2d, pixel_scales=pixel_scales, sub_size=sub_size, origin=origin ) return grid_2d_native_from( grid_2d_slim=grid_2d_slim, mask_2d=mask_2d, sub_size=sub_size ) def grid_2d_slim_via_shape_native_from( shape_native: Tuple[int, int], pixel_scales: Union[float, Tuple[float, float]], sub_size: int, origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided into a finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes the (y,x) scaled coordinates at the centre of every sub-pixel defined by this 2D mask array. The sub-grid is returned in its slimmed dimensions with shape (total_pixels**2*sub_size**2, 2). y coordinates are stored in the 0 index of the second dimension, x coordinates in the 1 index. Grid2D are defined from the top-left corner, where the first sub-pixel corresponds to index [0,0]. Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth. Parameters ---------- shape_native The (y,x) shape of the 2D array the sub-grid of coordinates is computed for. pixel_scales The (y,x) scaled units to pixel units conversion factor of the 2D mask array. sub_size The size of the sub-grid that each pixel of the 2D mask array is divided into. origin The (y,x) origin of the 2D array, which the sub-grid is shifted around. Returns ------- ndarray A sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask array. The sub grid is slimmed and has dimensions (total_unmasked_pixels*sub_size**2, 2). Examples -------- mask = np.array([[True, False, True], [False, False, False] [True, False, True]]) grid_2d_slim = grid_2d_slim_via_shape_native_from(shape_native=(3,3), pixel_scales=(0.5, 0.5), sub_size=2, origin=(0.0, 0.0)) """ return grid_2d_slim_via_mask_from( mask_2d=np.full(fill_value=False, shape=shape_native), pixel_scales=pixel_scales, sub_size=sub_size, origin=origin, ) def grid_2d_via_shape_native_from( shape_native: Tuple[int, int], pixel_scales: Union[float, Tuple[float, float]], sub_size: int, origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ For a sub-grid, every unmasked pixel of its 2D mask with shape (total_y_pixels, total_x_pixels) is divided into a finer uniform grid of shape (total_y_pixels*sub_size, total_x_pixels*sub_size). This routine computes the (y,x) scaled coordinates at the centre of every sub-pixel defined by this 2D mask array. The sub-grid is returned in its native dimensions with shape (total_y_pixels*sub_size, total_x_pixels*sub_size). y coordinates are stored in the 0 index of the second dimension, x coordinates in the 1 index. Grids are defined from the top-left corner, where the first sub-pixel corresponds to index [0,0]. Sub-pixels that are part of the same mask array pixel are indexed next to one another, such that the second sub-pixel in the first pixel has index 1, its next sub-pixel has index 2, and so forth. Parameters ---------- shape_native The (y,x) shape of the 2D array the sub-grid of coordinates is computed for. pixel_scales The (y,x) scaled units to pixel units conversion factor of the 2D mask array. sub_size The size of the sub-grid that each pixel of the 2D mask array is divided into. origin : (float, flloat) The (y,x) origin of the 2D array, which the sub-grid is shifted around. Returns ------- ndarray A sub grid of (y,x) scaled coordinates at the centre of every pixel unmasked pixel on the 2D mask array. The sub grid array has dimensions (total_y_pixels*sub_size, total_x_pixels*sub_size). Examples -------- grid_2d = grid_2d_via_shape_native_from(shape_native=(3, 3), pixel_scales=(1.0, 1.0), sub_size=2, origin=(0.0, 0.0)) """ return grid_2d_via_mask_from( mask_2d=np.full(fill_value=False, shape=shape_native), pixel_scales=pixel_scales, sub_size=sub_size, origin=origin, ) @numba_util.jit() def grid_scaled_2d_slim_radial_projected_from( extent: np.ndarray, centre: Tuple[float, float], pixel_scales: Union[float, Tuple[float, float]], sub_size: int, shape_slim: Optional[int] = 0, ) -> np.ndarray: """ Determine a projected radial grid of points from a 2D region of coordinates defined by an extent [xmin, xmax, ymin, ymax] and with a (y,x) centre. This functions operates as follows: 1) Given the region defined by the extent [xmin, xmax, ymin, ymax], the algorithm finds the longest 1D distance of the 4 paths from the (y,x) centre to the edge of the region (e.g. following the positive / negative y and x axes). 2) Use the pixel-scale corresponding to the direction chosen (e.g. if the positive x-axis was the longest, the pixel_scale in the x dimension is used). 3) Determine the number of pixels between the centre and the edge of the region using the longest path between the two chosen above. 4) Create a (y,x) grid of radial points where all points are at the centre's y value = 0.0 and the x values iterate from the centre in increasing steps of the pixel-scale. 5) Rotate these radial coordinates by the input `angle` clockwise. A schematric is shown below: ------------------- | | |<- - - - ->x | x = centre | | <-> = longest radial path from centre to extent edge | | ------------------- Using the centre x above, this function finds the longest radial path to the edge of the extent window. The returned `grid_radii` represents a radial set of points that in 1D sample the 2D grid outwards from its centre. This grid stores the radial coordinates as (y,x) values (where all y values are the same) as opposed to a 1D data structure so that it can be used in functions which require that a 2D grid structure is input. Parameters ---------- extent The extent of the grid the radii grid is computed using, with format [xmin, xmax, ymin, ymax] centre : (float, flloat) The (y,x) central coordinate which the radial grid is traced outwards from. pixel_scales The (y,x) scaled units to pixel units conversion factor of the 2D mask array. sub_size The size of the sub-grid that each pixel of the 2D mask array is divided into. shape_slim Manually choose the shape of the 1D projected grid that is returned. If 0, the border based on the 2D grid is used (due to numba None cannot be used as a default value). Returns ------- ndarray A radial set of points sampling the longest distance from the centre to the edge of the extent in along the positive x-axis. """ distance_to_positive_x = extent[1] - centre[1] distance_to_positive_y = extent[3] - centre[0] distance_to_negative_x = centre[1] - extent[0] distance_to_negative_y = centre[0] - extent[2] scaled_distance = max( [ distance_to_positive_x, distance_to_positive_y, distance_to_negative_x, distance_to_negative_y, ] ) if (scaled_distance == distance_to_positive_y) or ( scaled_distance == distance_to_negative_y ): pixel_scale = pixel_scales[0] else: pixel_scale = pixel_scales[1] if shape_slim == 0: shape_slim = sub_size * int((scaled_distance / pixel_scale)) + 1 grid_scaled_2d_slim_radii = np.zeros((shape_slim, 2)) grid_scaled_2d_slim_radii[:, 0] += centre[0] radii = centre[1] for slim_index in range(shape_slim): grid_scaled_2d_slim_radii[slim_index, 1] = radii radii += pixel_scale / sub_size return grid_scaled_2d_slim_radii @numba_util.jit() def grid_pixels_2d_slim_from( grid_scaled_2d_slim: np.ndarray, shape_native: Tuple[int, int], pixel_scales: Union[float, Tuple[float, float]], origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ Convert a slimmed grid of 2d (y,x) scaled coordinates to a slimmed grid of 2d (y,x) pixel coordinate values. Pixel coordinates are returned as floats such that they include the decimal offset from each pixel's top-left corner relative to the input scaled coordinate. The input and output grids are both slimmed and therefore shape (total_pixels, 2). The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to the highest (most positive) y scaled coordinate and lowest (most negative) x scaled coordinate on the gird. The scaled grid is defined by an origin and coordinates are shifted to this origin before computing their 1D grid pixel coordinate values. Parameters ---------- grid_scaled_2d_slim: np.ndarray The slimmed grid of 2D (y,x) coordinates in scaled units which are converted to pixel value coordinates. shape_native The (y,x) shape of the original 2D array the scaled coordinates were computed on. pixel_scales The (y,x) scaled units to pixel units conversion factor of the original 2D array. origin : (float, flloat) The (y,x) origin of the grid, which the scaled grid is shifted to. Returns ------- ndarray A slimmed grid of 2D (y,x) pixel-value coordinates with dimensions (total_pixels, 2). Examples -------- grid_scaled_2d_slim = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]]) grid_pixels_2d_slim = grid_scaled_2d_slim_from(grid_scaled_2d_slim=grid_scaled_2d_slim, shape=(2,2), pixel_scales=(0.5, 0.5), origin=(0.0, 0.0)) """ grid_pixels_2d_slim = np.zeros((grid_scaled_2d_slim.shape[0], 2)) centres_scaled = geometry_util.central_scaled_coordinate_2d_from( shape_native=shape_native, pixel_scales=pixel_scales, origin=origin ) for slim_index in range(grid_scaled_2d_slim.shape[0]): grid_pixels_2d_slim[slim_index, 0] = ( (-grid_scaled_2d_slim[slim_index, 0] / pixel_scales[0]) + centres_scaled[0] + 0.5 ) grid_pixels_2d_slim[slim_index, 1] = ( (grid_scaled_2d_slim[slim_index, 1] / pixel_scales[1]) + centres_scaled[1] + 0.5 ) return grid_pixels_2d_slim @numba_util.jit() def grid_pixel_centres_2d_slim_from( grid_scaled_2d_slim: np.ndarray, shape_native: Tuple[int, int], pixel_scales: Union[float, Tuple[float, float]], origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ Convert a slimmed grid of 2D (y,x) scaled coordinates to a slimmed grid of 2D (y,x) pixel values. Pixel coordinates are returned as integers such that they map directly to the pixel they are contained within. The input and output grids are both slimmed and therefore shape (total_pixels, 2). The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to the highest (most positive) y scaled coordinate and lowest (most negative) x scaled coordinate on the gird. The scaled coordinate grid is defined by the class attribute origin, and coordinates are shifted to this origin before computing their 1D grid pixel indexes. Parameters ---------- grid_scaled_2d_slim: np.ndarray The slimmed grid of 2D (y,x) coordinates in scaled units which is converted to pixel indexes. shape_native The (y,x) shape of the original 2D array the scaled coordinates were computed on. pixel_scales The (y,x) scaled units to pixel units conversion factor of the original 2D array. origin : (float, flloat) The (y,x) origin of the grid, which the scaled grid is shifted Returns ------- ndarray A slimmed grid of 2D (y,x) pixel indexes with dimensions (total_pixels, 2). Examples -------- grid_scaled_2d_slim = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]]) grid_pixels_2d_slim = grid_scaled_2d_slim_from(grid_scaled_2d_slim=grid_scaled_2d_slim, shape=(2,2), pixel_scales=(0.5, 0.5), origin=(0.0, 0.0)) """ grid_pixels_2d_slim = np.zeros((grid_scaled_2d_slim.shape[0], 2)) centres_scaled = geometry_util.central_scaled_coordinate_2d_from( shape_native=shape_native, pixel_scales=pixel_scales, origin=origin ) for slim_index in range(grid_scaled_2d_slim.shape[0]): grid_pixels_2d_slim[slim_index, 0] = int( (-grid_scaled_2d_slim[slim_index, 0] / pixel_scales[0]) + centres_scaled[0] + 0.5 ) grid_pixels_2d_slim[slim_index, 1] = int( (grid_scaled_2d_slim[slim_index, 1] / pixel_scales[1]) + centres_scaled[1] + 0.5 ) return grid_pixels_2d_slim @numba_util.jit() def grid_pixel_indexes_2d_slim_from( grid_scaled_2d_slim: np.ndarray, shape_native: Tuple[int, int], pixel_scales: Union[float, Tuple[float, float]], origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ Convert a slimmed grid of 2D (y,x) scaled coordinates to a slimmed grid of pixel indexes. Pixel coordinates are returned as integers such that they are the pixel from the top-left of the 2D grid going rights and then downwards. The input and output grids are both slimmed and have shapes (total_pixels, 2) and (total_pixels,). For example: The pixel at the top-left, whose native index is [0,0], corresponds to slimmed pixel index 0. The fifth pixel on the top row, whose native index is [0,5], corresponds to slimmed pixel index 4. The first pixel on the second row, whose native index is [0,1], has slimmed pixel index 10 if a row has 10 pixels. The scaled coordinate grid is defined by the class attribute origin, and coordinates are shifted to this origin before computing their 1D grid pixel indexes. The input and output grids are both of shape (total_pixels, 2). Parameters ---------- grid_scaled_2d_slim: np.ndarray The slimmed grid of 2D (y,x) coordinates in scaled units which is converted to slimmed pixel indexes. shape_native The (y,x) shape of the original 2D array the scaled coordinates were computed on. pixel_scales The (y,x) scaled units to pixel units conversion factor of the original 2D array. origin : (float, flloat) The (y,x) origin of the grid, which the scaled grid is shifted. Returns ------- ndarray A grid of slimmed pixel indexes with dimensions (total_pixels,). Examples -------- grid_scaled_2d_slim = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]]) grid_pixel_indexes_2d_slim = grid_pixel_indexes_2d_slim_from(grid_scaled_2d_slim=grid_scaled_2d_slim, shape=(2,2), pixel_scales=(0.5, 0.5), origin=(0.0, 0.0)) """ grid_pixels_2d_slim = grid_pixel_centres_2d_slim_from( grid_scaled_2d_slim=grid_scaled_2d_slim, shape_native=shape_native, pixel_scales=pixel_scales, origin=origin, ) grid_pixel_indexes_2d_slim = np.zeros(grid_pixels_2d_slim.shape[0]) for slim_index in range(grid_pixels_2d_slim.shape[0]): grid_pixel_indexes_2d_slim[slim_index] = int( grid_pixels_2d_slim[slim_index, 0] * shape_native[1] + grid_pixels_2d_slim[slim_index, 1] ) return grid_pixel_indexes_2d_slim @numba_util.jit() def grid_scaled_2d_slim_from( grid_pixels_2d_slim: np.ndarray, shape_native: Tuple[int, int], pixel_scales: Union[float, Tuple[float, float]], origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ Convert a slimmed grid of 2D (y,x) pixel coordinates to a slimmed grid of 2D (y,x) scaled values. The input and output grids are both slimmed and therefore shape (total_pixels, 2). The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to the highest (most positive) y scaled coordinate and lowest (most negative) x scaled coordinate on the gird. The scaled coordinate origin is defined by the class attribute origin, and coordinates are shifted to this origin after computing their values from the 1D grid pixel indexes. Parameters ---------- grid_pixels_2d_slim: np.ndarray The slimmed grid of (y,x) coordinates in pixel values which is converted to scaled coordinates. shape_native The (y,x) shape of the original 2D array the scaled coordinates were computed on. pixel_scales The (y,x) scaled units to pixel units conversion factor of the original 2D array. origin : (float, flloat) The (y,x) origin of the grid, which the scaled grid is shifted. Returns ------- ndarray A slimmed grid of 2d scaled coordinates with dimensions (total_pixels, 2). Examples -------- grid_pixels_2d_slim = np.array([[0,0], [0,1], [1,0], [1,1]) grid_pixels_2d_slim = grid_scaled_2d_slim_from(grid_pixels_2d_slim=grid_pixels_2d_slim, shape=(2,2), pixel_scales=(0.5, 0.5), origin=(0.0, 0.0)) """ grid_scaled_2d_slim = np.zeros((grid_pixels_2d_slim.shape[0], 2)) centres_scaled = geometry_util.central_scaled_coordinate_2d_from( shape_native=shape_native, pixel_scales=pixel_scales, origin=origin ) for slim_index in range(grid_scaled_2d_slim.shape[0]): grid_scaled_2d_slim[slim_index, 0] = ( -(grid_pixels_2d_slim[slim_index, 0] - centres_scaled[0] - 0.5) * pixel_scales[0] ) grid_scaled_2d_slim[slim_index, 1] = ( grid_pixels_2d_slim[slim_index, 1] - centres_scaled[1] - 0.5 ) * pixel_scales[1] return grid_scaled_2d_slim @numba_util.jit() def grid_pixel_centres_2d_from( grid_scaled_2d: np.ndarray, shape_native: Tuple[int, int], pixel_scales: Union[float, Tuple[float, float]], origin: Tuple[float, float] = (0.0, 0.0), ) -> np.ndarray: """ Convert a native grid of 2D (y,x) scaled coordinates to a native grid of 2D (y,x) pixel values. Pixel coordinates are returned as integers such that they map directly to the pixel they are contained within. The input and output grids are both native resolution and therefore have shape (y_pixels, x_pixels, 2). The pixel coordinate origin is at the top left corner of the grid, such that the pixel [0,0] corresponds to the highest (most positive) y scaled coordinate and lowest (most negative) x scaled coordinate on the gird. The scaled coordinate grid is defined by the class attribute origin, and coordinates are shifted to this origin before computing their 1D grid pixel indexes. Parameters ---------- grid_scaled_2d: np.ndarray The native grid of 2D (y,x) coordinates in scaled units which is converted to pixel indexes. shape_native The (y,x) shape of the original 2D array the scaled coordinates were computed on. pixel_scales The (y,x) scaled units to pixel units conversion factor of the original 2D array. origin : (float, flloat) The (y,x) origin of the grid, which the scaled grid is shifted Returns ------- ndarray A native grid of 2D (y,x) pixel indexes with dimensions (y_pixels, x_pixels, 2). Examples -------- grid_scaled_2d = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0], [4.0, 4.0]]) grid_pixel_centres_2d = grid_pixel_centres_2d_from(grid_scaled_2d=grid_scaled_2d, shape=(2,2), pixel_scales=(0.5, 0.5), origin=(0.0, 0.0)) """ grid_pixels_2d = np.zeros((grid_scaled_2d.shape[0], grid_scaled_2d.shape[1], 2)) centres_scaled = geometry_util.central_scaled_coordinate_2d_from( shape_native=shape_native, pixel_scales=pixel_scales, origin=origin ) for y in range(grid_scaled_2d.shape[0]): for x in range(grid_scaled_2d.shape[1]): grid_pixels_2d[y, x, 0] = int( (-grid_scaled_2d[y, x, 0] / pixel_scales[0]) + centres_scaled[0] + 0.5 ) grid_pixels_2d[y, x, 1] = int( (grid_scaled_2d[y, x, 1] / pixel_scales[1]) + centres_scaled[1] + 0.5 ) return grid_pixels_2d @numba_util.jit() def relocated_grid_via_jit_from(grid, border_grid): """ Relocate the coordinates of a grid to its border if they are outside the border, where the border is defined as all pixels at the edge of the grid's mask (see *mask._border_1d_indexes*). This is performed as follows: 1: Use the mean value of the grid's y and x coordinates to determine the origin of the grid. 2: Compute the radial distance of every grid coordinate from the origin. 3: For every coordinate, find its nearest pixel in the border. 4: Determine if it is outside the border, by comparing its radial distance from the origin to its paired border pixel's radial distance. 5: If its radial distance is larger, use the ratio of radial distances to move the coordinate to the border (if its inside the border, do nothing). The method can be used on uniform or irregular grids, however for irregular grids the border of the 'image-plane' mask is used to define border pixels. Parameters ---------- grid : Grid2D The grid (uniform or irregular) whose pixels are to be relocated to the border edge if outside it. border_grid : Grid2D The grid of border (y,x) coordinates. """ grid_relocated = np.zeros(grid.shape) grid_relocated[:, :] = grid[:, :] border_origin = np.zeros(2) border_origin[0] = np.mean(border_grid[:, 0]) border_origin[1] = np.mean(border_grid[:, 1]) border_grid_radii = np.sqrt( np.add( np.square(np.subtract(border_grid[:, 0], border_origin[0])), np.square(np.subtract(border_grid[:, 1], border_origin[1])), ) ) border_min_radii = np.min(border_grid_radii) grid_radii = np.sqrt( np.add( np.square(np.subtract(grid[:, 0], border_origin[0])), np.square(np.subtract(grid[:, 1], border_origin[1])), ) ) for pixel_index in range(grid.shape[0]): if grid_radii[pixel_index] > border_min_radii: closest_pixel_index = np.argmin( np.square(grid[pixel_index, 0] - border_grid[:, 0]) + np.square(grid[pixel_index, 1] - border_grid[:, 1]) ) move_factor = ( border_grid_radii[closest_pixel_index] / grid_radii[pixel_index] ) if move_factor < 1.0: grid_relocated[pixel_index, :] = ( move_factor * (grid[pixel_index, :] - border_origin[:]) + border_origin[:] ) return grid_relocated @numba_util.jit() def furthest_grid_2d_slim_index_from( grid_2d_slim: np.ndarray, slim_indexes: np.ndarray, coordinate: Tuple[float, float] ) -> int: distance_to_centre = 0.0 for slim_index in slim_indexes: y = grid_2d_slim[slim_index, 0] x = grid_2d_slim[slim_index, 1] distance_to_centre_new = (x - coordinate[1]) ** 2 + (y - coordinate[0]) ** 2 if distance_to_centre_new >= distance_to_centre: distance_to_centre = distance_to_centre_new furthest_grid_2d_slim_index = slim_index return furthest_grid_2d_slim_index def grid_2d_slim_from( grid_2d_native: np.ndarray, mask: np.ndarray, sub_size: int ) -> np.ndarray: """ For a native 2D grid and mask of shape [total_y_pixels, total_x_pixels, 2], map the values of all unmasked pixels to a slimmed grid of shape [total_unmasked_pixels, 2]. The pixel coordinate origin is at the top left corner of the native grid and goes right-wards and downwards, such that for an grid of shape (3,3) where all pixels are unmasked: - pixel [0,0] of the 2D grid will correspond to index 0 of the 1D grid. - pixel [0,1] of the 2D grid will correspond to index 1 of the 1D grid. - pixel [1,0] of the 2D grid will correspond to index 4 of the 1D grid. Parameters ---------- grid_2d_native : ndarray The native grid of (y,x) values which are mapped to the slimmed grid. mask_2d A 2D array of bools, where `False` values mean unmasked and are included in the mapping. sub_size The size (sub_size x sub_size) of each unmasked pixels sub-array. Returns ------- ndarray A 1D grid of values mapped from the 2D grid with dimensions (total_unmasked_pixels). """ grid_1d_slim_y = array_2d_util.array_2d_slim_from( array_2d_native=grid_2d_native[:, :, 0], mask_2d=mask, sub_size=sub_size ) grid_1d_slim_x = array_2d_util.array_2d_slim_from( array_2d_native=grid_2d_native[:, :, 1], mask_2d=mask, sub_size=sub_size ) return np.stack((grid_1d_slim_y, grid_1d_slim_x), axis=-1) def grid_2d_native_from( grid_2d_slim: np.ndarray, mask_2d: np.ndarray, sub_size: int ) -> np.ndarray: """ For a slimmed 2D grid of shape [total_unmasked_pixels, 2], that was computed by extracting the unmasked values from a native 2D grid of shape [total_y_pixels, total_x_pixels, 2], map the slimmed grid's coordinates back to the native 2D grid where masked values are set to zero. This uses a 1D array 'slim_to_native' where each index gives the 2D pixel indexes of the grid's native unmasked pixels, for example: - If slim_to_native[0] = [0,0], the first value of the 1D array maps to the pixels [0,0,:] of the native 2D grid. - If slim_to_native[1] = [0,1], the second value of the 1D array maps to the pixels [0,1,:] of the native 2D grid. - If slim_to_native[4] = [1,1], the fifth value of the 1D array maps to the pixels [1,1,:] of the native 2D grid. Parameters ---------- grid_2d_slim The (y,x) values of the slimmed 2D grid which are mapped to the native 2D grid. mask_2d A 2D array of bools, where `False` values mean unmasked and are included in the mapping. sub_size The size (sub_size x sub_size) of each unmasked pixels sub-array. Returns ------- ndarray A NumPy array of shape [total_y_pixels, total_x_pixels, 2] corresponding to the (y,x) values of the native 2D mapped from the slimmed grid. """ grid_2d_native_y = array_2d_util.array_2d_native_from( array_2d_slim=grid_2d_slim[:, 0], mask_2d=mask_2d, sub_size=sub_size ) grid_2d_native_x = array_2d_util.array_2d_native_from( array_2d_slim=grid_2d_slim[:, 1], mask_2d=mask_2d, sub_size=sub_size ) return
np.stack((grid_2d_native_y, grid_2d_native_x), axis=-1)
numpy.stack
# -*- encoding:utf-8 -*- # @Time : 2021/1/3 15:15 # @Author : gfjiang import os.path as osp import mmcv import numpy as np import cvtools import matplotlib.pyplot as plt import cv2.cv2 as cv from functools import partial import torch import math from cvtools.utils.path import add_prefix_filename_suffix from mmdet.ops import nms from mmdet.apis import init_detector, inference_detector def draw_features(module, input, output, work_dir='./'): x = output.cpu().numpy() out_channels = list(output.shape)[1] height = int(math.sqrt(out_channels)) width = height if list(output.shape)[2] < 128: return fig = plt.figure(figsize=(32, 32)) fig.subplots_adjust(left=0.05, right=0.95, bottom=0.05, top=0.95, wspace=0.05, hspace=0.05) for i in range(height * width): plt.subplot(height, width, i + 1) plt.axis('off') img = x[0, i, :, :] pmin = np.min(img) pmax = np.max(img) img = ((img - pmin) / (pmax - pmin + 0.000001))*255 # float在[0,1]之间,转换成0-255 img = img.astype(np.uint8) # 转成unit8 img = cv.applyColorMap(img, cv.COLORMAP_JET) # 生成heat map img = img[:, :, ::-1] # 注意cv2(BGR)和matplotlib(RGB)通道是相反的 plt.imshow(img) # print("{}/{}".format(i,width*height)) savename = get_image_name_for_hook(module, work_dir) fig.savefig(savename, dpi=100) fig.clf() plt.close() def get_image_name_for_hook(module, work_dir='./'): """ Generate image filename for hook function Parameters: ----------- module: module of neural network """ # os.makedirs(work_dir, exist_ok=True) module_name = str(module) base_name = module_name.split('(')[0] index = 0 image_name = '.' # '.' is surely exist, to make first loop condition True while osp.exists(image_name): index += 1 image_name = osp.join( work_dir, 'feats', '%s_%d.png' % (base_name, index)) return image_name class AerialDetectionOBB(object): def __init__(self, config, pth): self.imgs = [] self.cfg = mmcv.Config.fromfile(config) self.pth = pth print('loading model {} ...'.format(pth)) self.model = init_detector(self.cfg, self.pth, device='cuda:0') self.results = [] self.img_detected = [] # self.vis_feats((torch.nn.Conv2d, torch.nn.MaxPool2d)) def __call__(self, imgs_or_path, det_thrs=0.5, vis=False, vis_thr=0.5, save_root=''): if isinstance(imgs_or_path, str): self.imgs += cvtools.get_files_list(imgs_or_path) else: self.imgs += imgs_or_path prog_bar = mmcv.ProgressBar(len(self.imgs)) for _, img in enumerate(self.imgs): self.detect(img, det_thrs=det_thrs, vis=vis, vis_thr=vis_thr, save_root=save_root) prog_bar.update() def detect(self, img, det_thrs=0.5, vis=False, vis_thr=0.5, save_root=''): result = inference_detector(self.model, img) # result = self.nms(result) if isinstance(det_thrs, float): det_thrs = [det_thrs] * len(result) if vis: to_file = osp.join(save_root, osp.basename(img)) to_file = add_prefix_filename_suffix(to_file, suffix='_obb') self.vis(img, result, vis_thr=vis_thr, to_file=to_file) result = [det[det[..., -1] > det_thr] for det, det_thr in zip(result, det_thrs)] if len(result) == 0: print('detect: image {} has no object.'.format(img)) self.img_detected.append(img) self.results.append(result) return result def nms(self, result, nms_th=0.3): dets_num = [len(det_cls) for det_cls in result] result =
np.vstack(result)
numpy.vstack
# -*- coding: utf-8 -*- """ Created on Thu Nov 28 12:10:11 2019 @author: Omer """ ## File handler ## This file was initially intended purely to generate the matrices for the near earth code found in: https://public.ccsds.org/Pubs/131x1o2e2s.pdf ## The values from the above pdf were copied manually to a txt file, and it is the purpose of this file to parse it. ## The emphasis here is on correctness, I currently do not see a reason to generalise this file, since matrices will be saved in either json or some matrix friendly format. import numpy as np from scipy.linalg import circulant #import matplotlib.pyplot as plt import scipy.io import common import hashlib import os projectDir = os.environ.get('LDPC') if projectDir == None: import pathlib projectDir = pathlib.Path(__file__).parent.absolute() ## <NAME>: added on 01/12/2020, need to make sure this doesn't break anything. import sys sys.path.insert(1, projectDir) FILE_HANDLER_INT_DATA_TYPE = np.int32 GENERAL_CODE_MATRIX_DATA_TYPE = np.int32 NIBBLE_CONVERTER = np.array([8, 4, 2, 1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) def nibbleToHex(inputArray): n = NIBBLE_CONVERTER.dot(inputArray) if n == 10: h = 'A' elif n== 11: h = 'B' elif n== 12: h = 'C' elif n== 13: h = 'D' elif n== 14: h = 'E' elif n== 15: h = 'F' else: h = str(n) return h def binaryArraytoHex(inputArray): d1 = len(inputArray) assert (d1 % 4 == 0) outputArray = np.zeros(d1//4, dtype = str) outputString = '' for j in range(d1//4): nibble = inputArray[4 * j : 4 * j + 4] h = nibbleToHex(nibble) outputArray[j] = h outputString = outputString + h return outputArray, outputString def hexStringToBinaryArray(hexString): outputBinary = np.array([], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) for i in hexString: if i == '0': nibble = np.array([0,0,0,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '1': nibble = np.array([0,0,0,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '2': nibble = np.array([0,0,1,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '3': nibble = np.array([0,0,1,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '4': nibble = np.array([0,1,0,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '5': nibble = np.array([0,1,0,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '6': nibble = np.array([0,1,1,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '7': nibble = np.array([0,1,1,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '8': nibble = np.array([1,0,0,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == '9': nibble = np.array([1,0,0,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == 'A': nibble = np.array([1,0,1,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == 'B': nibble = np.array([1,0,1,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == 'C': nibble = np.array([1,1,0,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == 'D': nibble = np.array([1,1,0,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == 'E': nibble = np.array([1,1,1,0], dtype = GENERAL_CODE_MATRIX_DATA_TYPE) elif i == 'F': nibble =
np.array([1,1,1,1], dtype = GENERAL_CODE_MATRIX_DATA_TYPE)
numpy.array
# # Copyright (c) 2021 The GPflux Contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import abc import numpy as np import pytest import tensorflow as tf import tensorflow_probability as tfp from gpflow.kullback_leiblers import gauss_kl from gpflux.encoders import DirectlyParameterizedNormalDiag from gpflux.layers import LatentVariableLayer, LayerWithObservations, TrackableLayer tf.keras.backend.set_floatx("float64") ############ # Utilities ############ def _zero_one_normal_prior(w_dim): """ N(0, I) prior """ return tfp.distributions.MultivariateNormalDiag(loc=np.zeros(w_dim), scale_diag=np.ones(w_dim)) def get_distributions_with_w_dim(): distributions = [] for d in [1, 5]: mean = np.zeros(d) scale_tri_l = np.eye(d) mvn = tfp.distributions.MultivariateNormalTriL(mean, scale_tri_l) std = np.ones(d) mvn_diag = tfp.distributions.MultivariateNormalDiag(mean, std) distributions.append((mvn, d)) distributions.append((mvn_diag, d)) return distributions ############ # Tests ############ @pytest.mark.parametrize("distribution, w_dim", get_distributions_with_w_dim()) def test_local_kls(distribution, w_dim): lv = LatentVariableLayer(encoder=None, prior=distribution) # test kl is 0 when posteriors == priors posterior = distribution assert lv._local_kls(posterior) == 0 # test kl > 0 when posteriors != priors batch_size = 10 params = distribution.parameters posterior_params = { k: [v + 0.5 for _ in range(batch_size)] for k, v in params.items() if isinstance(v, np.ndarray) } posterior = lv.distribution_class(**posterior_params) local_kls = lv._local_kls(posterior) assert np.all(local_kls > 0) assert local_kls.shape == (batch_size,) @pytest.mark.parametrize("w_dim", [1, 5]) def test_local_kl_gpflow_consistency(w_dim): num_data = 400 means = np.random.randn(num_data, w_dim) encoder = DirectlyParameterizedNormalDiag(num_data, w_dim, means) lv = LatentVariableLayer(encoder=encoder, prior=_zero_one_normal_prior(w_dim)) posteriors = lv._inference_posteriors( [np.random.randn(num_data, 3), np.random.randn(num_data, 2)] ) q_mu = posteriors.parameters["loc"] q_sqrt = posteriors.parameters["scale_diag"] gpflow_local_kls = gauss_kl(q_mu, q_sqrt) tfp_local_kls = tf.reduce_sum(lv._local_kls(posteriors)) np.testing.assert_allclose(tfp_local_kls, gpflow_local_kls, rtol=1e-10) class ArrayMatcher: def __init__(self, expected): self.expected = expected def __eq__(self, actual): return np.allclose(actual, self.expected, equal_nan=True) @pytest.mark.parametrize("w_dim", [1, 5]) def test_latent_variable_layer_losses(mocker, w_dim): num_data, x_dim, y_dim = 43, 3, 1 prior_shape = (w_dim,) posteriors_shape = (num_data, w_dim) prior = tfp.distributions.MultivariateNormalDiag( loc=np.random.randn(*prior_shape), scale_diag=
np.random.randn(*prior_shape)
numpy.random.randn
''' <NAME> set up :2020-1-9 intergrate img and label into one file -- fiducial1024_v1 ''' import argparse import sys, os import pickle import random import collections import json import numpy as np import scipy.io as io import scipy.misc as m import matplotlib.pyplot as plt import glob import math import time import threading import multiprocessing as mp from multiprocessing import Pool import re import cv2 # sys.path.append('/lustre/home/gwxie/hope/project/dewarp/datasets/') # /lustre/home/gwxie/program/project/unwarp/perturbed_imgaes/GAN import utils def getDatasets(dir): return os.listdir(dir) class perturbed(utils.BasePerturbed): def __init__(self, path, bg_path, save_path, save_suffix): self.path = path self.bg_path = bg_path self.save_path = save_path self.save_suffix = save_suffix def save_img(self, m, n, fold_curve='fold', repeat_time=4, fiducial_points = 16, relativeShift_position='relativeShift_v2'): origin_img = cv2.imread(self.path, flags=cv2.IMREAD_COLOR) save_img_shape = [512*2, 480*2] # 320 # reduce_value = np.random.choice([2**4, 2**5, 2**6, 2**7, 2**8], p=[0.01, 0.1, 0.4, 0.39, 0.1]) reduce_value = np.random.choice([2*2, 4*2, 8*2, 16*2, 24*2, 32*2, 40*2, 48*2], p=[0.02, 0.18, 0.2, 0.3, 0.1, 0.1, 0.08, 0.02]) # reduce_value = np.random.choice([8*2, 16*2, 24*2, 32*2, 40*2, 48*2], p=[0.01, 0.02, 0.2, 0.4, 0.19, 0.18]) # reduce_value = np.random.choice([16, 24, 32, 40, 48, 64], p=[0.01, 0.1, 0.2, 0.4, 0.2, 0.09]) base_img_shrink = save_img_shape[0] - reduce_value # enlarge_img_shrink = [1024, 768] # enlarge_img_shrink = [896, 672] # 420 enlarge_img_shrink = [512*4, 480*4] # 420 # enlarge_img_shrink = [896*2, 768*2] # 420 # enlarge_img_shrink = [896, 768] # 420 # enlarge_img_shrink = [768, 576] # 420 # enlarge_img_shrink = [640, 480] # 420 '''''' im_lr = origin_img.shape[0] im_ud = origin_img.shape[1] reduce_value_v2 = np.random.choice([2*2, 4*2, 8*2, 16*2, 24*2, 28*2, 32*2, 48*2], p=[0.02, 0.18, 0.2, 0.2, 0.1, 0.1, 0.1, 0.1]) # reduce_value_v2 = np.random.choice([16, 24, 28, 32, 48, 64], p=[0.01, 0.1, 0.2, 0.3, 0.25, 0.14]) if im_lr > im_ud: im_ud = min(int(im_ud / im_lr * base_img_shrink), save_img_shape[1] - reduce_value_v2) im_lr = save_img_shape[0] - reduce_value else: base_img_shrink = save_img_shape[1] - reduce_value im_lr = min(int(im_lr / im_ud * base_img_shrink), save_img_shape[0] - reduce_value_v2) im_ud = base_img_shrink if round(im_lr / im_ud, 2) < 0.5 or round(im_ud / im_lr, 2) < 0.5: repeat_time = min(repeat_time, 8) edge_padding = 3 im_lr -= im_lr % (fiducial_points-1) - (2*edge_padding) # im_lr % (fiducial_points-1) - 1 im_ud -= im_ud % (fiducial_points-1) - (2*edge_padding) # im_ud % (fiducial_points-1) - 1 im_hight = np.linspace(edge_padding, im_lr - edge_padding, fiducial_points, dtype=np.int64) im_wide = np.linspace(edge_padding, im_ud - edge_padding, fiducial_points, dtype=np.int64) # im_lr -= im_lr % (fiducial_points-1) - (1+2*edge_padding) # im_lr % (fiducial_points-1) - 1 # im_ud -= im_ud % (fiducial_points-1) - (1+2*edge_padding) # im_ud % (fiducial_points-1) - 1 # im_hight = np.linspace(edge_padding, im_lr - (1+edge_padding), fiducial_points, dtype=np.int64) # im_wide = np.linspace(edge_padding, im_ud - (1+edge_padding), fiducial_points, dtype=np.int64) im_x, im_y = np.meshgrid(im_hight, im_wide) segment_x = (im_lr) // (fiducial_points-1) segment_y = (im_ud) // (fiducial_points-1) # plt.plot(im_x, im_y, # color='limegreen', # marker='.', # linestyle='') # plt.grid(True) # plt.show() self.origin_img = cv2.resize(origin_img, (im_ud, im_lr), interpolation=cv2.INTER_CUBIC) perturbed_bg_ = getDatasets(self.bg_path) perturbed_bg_img_ = self.bg_path+random.choice(perturbed_bg_) perturbed_bg_img = cv2.imread(perturbed_bg_img_, flags=cv2.IMREAD_COLOR) mesh_shape = self.origin_img.shape[:2] self.synthesis_perturbed_img = np.full((enlarge_img_shrink[0], enlarge_img_shrink[1], 3), 256, dtype=np.float32)#np.zeros_like(perturbed_bg_img) # self.synthesis_perturbed_img = np.full((enlarge_img_shrink[0], enlarge_img_shrink[1], 3), 0, dtype=np.int16)#np.zeros_like(perturbed_bg_img) self.new_shape = self.synthesis_perturbed_img.shape[:2] perturbed_bg_img = cv2.resize(perturbed_bg_img, (save_img_shape[1], save_img_shape[0]), cv2.INPAINT_TELEA) origin_pixel_position = np.argwhere(np.zeros(mesh_shape, dtype=np.uint32) == 0).reshape(mesh_shape[0], mesh_shape[1], 2) pixel_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape(self.new_shape[0], self.new_shape[1], 2) self.perturbed_xy_ = np.zeros((self.new_shape[0], self.new_shape[1], 2)) # self.perturbed_xy_ = pixel_position.copy().astype(np.float32) # fiducial_points_grid = origin_pixel_position[im_x, im_y] self.synthesis_perturbed_label = np.zeros((self.new_shape[0], self.new_shape[1], 2)) x_min, y_min, x_max, y_max = self.adjust_position_v2(0, 0, mesh_shape[0], mesh_shape[1], save_img_shape) origin_pixel_position += [x_min, y_min] x_min, y_min, x_max, y_max = self.adjust_position(0, 0, mesh_shape[0], mesh_shape[1]) x_shift = random.randint(-enlarge_img_shrink[0]//16, enlarge_img_shrink[0]//16) y_shift = random.randint(-enlarge_img_shrink[1]//16, enlarge_img_shrink[1]//16) x_min += x_shift x_max += x_shift y_min += y_shift y_max += y_shift '''im_x,y''' im_x += x_min im_y += y_min self.synthesis_perturbed_img[x_min:x_max, y_min:y_max] = self.origin_img self.synthesis_perturbed_label[x_min:x_max, y_min:y_max] = origin_pixel_position synthesis_perturbed_img_map = self.synthesis_perturbed_img.copy() synthesis_perturbed_label_map = self.synthesis_perturbed_label.copy() foreORbackground_label = np.full((mesh_shape), 1, dtype=np.int16) foreORbackground_label_map = np.full((self.new_shape), 0, dtype=np.int16) foreORbackground_label_map[x_min:x_max, y_min:y_max] = foreORbackground_label # synthesis_perturbed_img_map = self.pad(self.synthesis_perturbed_img.copy(), x_min, y_min, x_max, y_max) # synthesis_perturbed_label_map = self.pad(synthesis_perturbed_label_map, x_min, y_min, x_max, y_max) '''*****************************************************************''' is_normalizationFun_mixture = self.is_perform(0.2, 0.8) # if not is_normalizationFun_mixture: normalizationFun_0_1 = False # normalizationFun_0_1 = self.is_perform(0.5, 0.5) if fold_curve == 'fold': fold_curve_random = True # is_normalizationFun_mixture = False normalizationFun_0_1 = self.is_perform(0.2, 0.8) if is_normalizationFun_mixture: alpha_perturbed = random.randint(80, 120) / 100 else: if normalizationFun_0_1 and repeat_time < 8: alpha_perturbed = random.randint(50, 70) / 100 else: alpha_perturbed = random.randint(70, 130) / 100 else: fold_curve_random = self.is_perform(0.1, 0.9) # False # self.is_perform(0.01, 0.99) alpha_perturbed = random.randint(80, 160) / 100 # is_normalizationFun_mixture = False # self.is_perform(0.01, 0.99) synthesis_perturbed_img = np.full_like(self.synthesis_perturbed_img, 256) # synthesis_perturbed_img = np.full_like(self.synthesis_perturbed_img, 0, dtype=np.int16) synthesis_perturbed_label = np.zeros_like(self.synthesis_perturbed_label) alpha_perturbed_change = self.is_perform(0.5, 0.5) p_pp_choice = self.is_perform(0.8, 0.2) if fold_curve == 'fold' else self.is_perform(0.1, 0.9) for repeat_i in range(repeat_time): if alpha_perturbed_change: if fold_curve == 'fold': if is_normalizationFun_mixture: alpha_perturbed = random.randint(80, 120) / 100 else: if normalizationFun_0_1 and repeat_time < 8: alpha_perturbed = random.randint(50, 70) / 100 else: alpha_perturbed = random.randint(70, 130) / 100 else: alpha_perturbed = random.randint(80, 160) / 100 '''''' linspace_x = [0, (self.new_shape[0] - im_lr) // 2 - 1, self.new_shape[0] - (self.new_shape[0] - im_lr) // 2 - 1, self.new_shape[0] - 1] linspace_y = [0, (self.new_shape[1] - im_ud) // 2 - 1, self.new_shape[1] - (self.new_shape[1] - im_ud) // 2 - 1, self.new_shape[1] - 1] linspace_x_seq = [1, 2, 3] linspace_y_seq = [1, 2, 3] r_x = random.choice(linspace_x_seq) r_y = random.choice(linspace_y_seq) perturbed_p = np.array( [random.randint(linspace_x[r_x-1] * 10, linspace_x[r_x] * 10), random.randint(linspace_y[r_y-1] * 10, linspace_y[r_y] * 10)])/10 if ((r_x == 1 or r_x == 3) and (r_y == 1 or r_y == 3)) and p_pp_choice: linspace_x_seq.remove(r_x) linspace_y_seq.remove(r_y) r_x = random.choice(linspace_x_seq) r_y = random.choice(linspace_y_seq) perturbed_pp = np.array( [random.randint(linspace_x[r_x-1] * 10, linspace_x[r_x] * 10), random.randint(linspace_y[r_y-1] * 10, linspace_y[r_y] * 10)])/10 # perturbed_p, perturbed_pp = np.array( # [random.randint(0, self.new_shape[0] * 10) / 10, # random.randint(0, self.new_shape[1] * 10) / 10]) \ # , np.array([random.randint(0, self.new_shape[0] * 10) / 10, # random.randint(0, self.new_shape[1] * 10) / 10]) # perturbed_p, perturbed_pp = np.array( # [random.randint((self.new_shape[0]-im_lr)//2*10, (self.new_shape[0]-(self.new_shape[0]-im_lr)//2) * 10) / 10, # random.randint((self.new_shape[1]-im_ud)//2*10, (self.new_shape[1]-(self.new_shape[1]-im_ud)//2) * 10) / 10]) \ # , np.array([random.randint((self.new_shape[0]-im_lr)//2*10, (self.new_shape[0]-(self.new_shape[0]-im_lr)//2) * 10) / 10, # random.randint((self.new_shape[1]-im_ud)//2*10, (self.new_shape[1]-(self.new_shape[1]-im_ud)//2) * 10) / 10]) '''''' perturbed_vp = perturbed_pp - perturbed_p perturbed_vp_norm = np.linalg.norm(perturbed_vp) perturbed_distance_vertex_and_line = np.dot((perturbed_p - pixel_position), perturbed_vp) / perturbed_vp_norm '''''' # perturbed_v = np.array([random.randint(-3000, 3000) / 100, random.randint(-3000, 3000) / 100]) # perturbed_v = np.array([random.randint(-4000, 4000) / 100, random.randint(-4000, 4000) / 100]) if fold_curve == 'fold' and self.is_perform(0.6, 0.4): # self.is_perform(0.3, 0.7): # perturbed_v = np.array([random.randint(-9000, 9000) / 100, random.randint(-9000, 9000) / 100]) perturbed_v = np.array([random.randint(-10000, 10000) / 100, random.randint(-10000, 10000) / 100]) # perturbed_v = np.array([random.randint(-11000, 11000) / 100, random.randint(-11000, 11000) / 100]) else: # perturbed_v = np.array([random.randint(-9000, 9000) / 100, random.randint(-9000, 9000) / 100]) # perturbed_v = np.array([random.randint(-16000, 16000) / 100, random.randint(-16000, 16000) / 100]) perturbed_v = np.array([random.randint(-8000, 8000) / 100, random.randint(-8000, 8000) / 100]) # perturbed_v = np.array([random.randint(-3500, 3500) / 100, random.randint(-3500, 3500) / 100]) # perturbed_v = np.array([random.randint(-600, 600) / 10, random.randint(-600, 600) / 10]) '''''' if fold_curve == 'fold': if is_normalizationFun_mixture: if self.is_perform(0.5, 0.5): perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line)) else: perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), random.randint(1, 2)) else: if normalizationFun_0_1: perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), 2) else: perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line)) else: if is_normalizationFun_mixture: if self.is_perform(0.5, 0.5): perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line)) else: perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), random.randint(1, 2)) else: if normalizationFun_0_1: perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), 2) else: perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line)) '''''' if fold_curve_random: # omega_perturbed = (alpha_perturbed+0.2) / (perturbed_d + alpha_perturbed) # omega_perturbed = alpha_perturbed**perturbed_d omega_perturbed = alpha_perturbed / (perturbed_d + alpha_perturbed) else: omega_perturbed = 1 - perturbed_d ** alpha_perturbed '''shadow''' if self.is_perform(0.6, 0.4): synthesis_perturbed_img_map[x_min:x_max, y_min:y_max] = np.minimum(np.maximum(synthesis_perturbed_img_map[x_min:x_max, y_min:y_max] - np.int16(np.round(omega_perturbed[x_min:x_max, y_min:y_max].repeat(3).reshape(x_max-x_min, y_max-y_min, 3) * abs(np.linalg.norm(perturbed_v//2))*np.array([0.4-random.random()*0.1, 0.4-random.random()*0.1, 0.4-random.random()*0.1]))), 0), 255) '''''' if relativeShift_position in ['position', 'relativeShift_v2']: self.perturbed_xy_ += np.array([omega_perturbed * perturbed_v[0], omega_perturbed * perturbed_v[1]]).transpose(1, 2, 0) else: print('relativeShift_position error') exit() ''' flat_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape( self.new_shape[0] * self.new_shape[1], 2) vtx, wts = self.interp_weights(self.perturbed_xy_.reshape(self.new_shape[0] * self.new_shape[1], 2), flat_position) wts_sum = np.abs(wts).sum(-1) # flat_img.reshape(flat_shape[0] * flat_shape[1], 3)[:] = interpolate(pixel, vtx, wts) wts = wts[wts_sum <= 1, :] vtx = vtx[wts_sum <= 1, :] synthesis_perturbed_img.reshape(self.new_shape[0] * self.new_shape[1], 3)[wts_sum <= 1, :] = self.interpolate(synthesis_perturbed_img_map.reshape(self.new_shape[0] * self.new_shape[1], 3), vtx, wts) synthesis_perturbed_label.reshape(self.new_shape[0] * self.new_shape[1], 2)[wts_sum <= 1, :] = self.interpolate(synthesis_perturbed_label_map.reshape(self.new_shape[0] * self.new_shape[1], 2), vtx, wts) foreORbackground_label = np.zeros(self.new_shape) foreORbackground_label.reshape(self.new_shape[0] * self.new_shape[1], 1)[wts_sum <= 1, :] = self.interpolate(foreORbackground_label_map.reshape(self.new_shape[0] * self.new_shape[1], 1), vtx, wts) foreORbackground_label[foreORbackground_label < 0.99] = 0 foreORbackground_label[foreORbackground_label >= 0.99] = 1 # synthesis_perturbed_img = np.around(synthesis_perturbed_img).astype(np.uint8) synthesis_perturbed_label[:, :, 0] *= foreORbackground_label synthesis_perturbed_label[:, :, 1] *= foreORbackground_label synthesis_perturbed_img[:, :, 0] *= foreORbackground_label synthesis_perturbed_img[:, :, 1] *= foreORbackground_label synthesis_perturbed_img[:, :, 2] *= foreORbackground_label self.synthesis_perturbed_img = synthesis_perturbed_img self.synthesis_perturbed_label = synthesis_perturbed_label ''' '''perspective''' perspective_shreshold = random.randint(26, 36)*10 # 280 x_min_per, y_min_per, x_max_per, y_max_per = self.adjust_position(perspective_shreshold, perspective_shreshold, self.new_shape[0]-perspective_shreshold, self.new_shape[1]-perspective_shreshold) pts1 = np.float32([[x_min_per, y_min_per], [x_max_per, y_min_per], [x_min_per, y_max_per], [x_max_per, y_max_per]]) e_1_ = x_max_per - x_min_per e_2_ = y_max_per - y_min_per e_3_ = e_2_ e_4_ = e_1_ perspective_shreshold_h = e_1_*0.02 perspective_shreshold_w = e_2_*0.02 a_min_, a_max_ = 70, 110 # if self.is_perform(1, 0): if fold_curve == 'curve' and self.is_perform(0.5, 0.5): if self.is_perform(0.5, 0.5): while True: pts2 = np.around( np.float32([[x_min_per - (random.random()) * perspective_shreshold, y_min_per + (random.random()) * perspective_shreshold], [x_max_per - (random.random()) * perspective_shreshold, y_min_per - (random.random()) * perspective_shreshold], [x_min_per + (random.random()) * perspective_shreshold, y_max_per + (random.random()) * perspective_shreshold], [x_max_per + (random.random()) * perspective_shreshold, y_max_per - (random.random()) * perspective_shreshold]])) # right e_1 = np.linalg.norm(pts2[0]-pts2[1]) e_2 = np.linalg.norm(pts2[0]-pts2[2]) e_3 = np.linalg.norm(pts2[1]-pts2[3]) e_4 = np.linalg.norm(pts2[2]-pts2[3]) if e_1_+perspective_shreshold_h > e_1 and e_2_+perspective_shreshold_w > e_2 and e_3_+perspective_shreshold_w > e_3 and e_4_+perspective_shreshold_h > e_4 and \ e_1_ - perspective_shreshold_h < e_1 and e_2_ - perspective_shreshold_w < e_2 and e_3_ - perspective_shreshold_w < e_3 and e_4_ - perspective_shreshold_h < e_4 and \ abs(e_1-e_4) < perspective_shreshold_h and abs(e_2-e_3) < perspective_shreshold_w: a0_, a1_, a2_, a3_ = self.get_angle_4(pts2) if (a0_ > a_min_ and a0_ < a_max_) or (a1_ > a_min_ and a1_ < a_max_) or (a2_ > a_min_ and a2_ < a_max_) or (a3_ > a_min_ and a3_ < a_max_): break else: while True: pts2 = np.around( np.float32([[x_min_per + (random.random()) * perspective_shreshold, y_min_per - (random.random()) * perspective_shreshold], [x_max_per + (random.random()) * perspective_shreshold, y_min_per + (random.random()) * perspective_shreshold], [x_min_per - (random.random()) * perspective_shreshold, y_max_per - (random.random()) * perspective_shreshold], [x_max_per - (random.random()) * perspective_shreshold, y_max_per + (random.random()) * perspective_shreshold]])) e_1 = np.linalg.norm(pts2[0]-pts2[1]) e_2 = np.linalg.norm(pts2[0]-pts2[2]) e_3 = np.linalg.norm(pts2[1]-pts2[3]) e_4 = np.linalg.norm(pts2[2]-pts2[3]) if e_1_+perspective_shreshold_h > e_1 and e_2_+perspective_shreshold_w > e_2 and e_3_+perspective_shreshold_w > e_3 and e_4_+perspective_shreshold_h > e_4 and \ e_1_ - perspective_shreshold_h < e_1 and e_2_ - perspective_shreshold_w < e_2 and e_3_ - perspective_shreshold_w < e_3 and e_4_ - perspective_shreshold_h < e_4 and \ abs(e_1-e_4) < perspective_shreshold_h and abs(e_2-e_3) < perspective_shreshold_w: a0_, a1_, a2_, a3_ = self.get_angle_4(pts2) if (a0_ > a_min_ and a0_ < a_max_) or (a1_ > a_min_ and a1_ < a_max_) or (a2_ > a_min_ and a2_ < a_max_) or (a3_ > a_min_ and a3_ < a_max_): break else: while True: pts2 = np.around(np.float32([[x_min_per+(random.random()-0.5)*perspective_shreshold, y_min_per+(random.random()-0.5)*perspective_shreshold], [x_max_per+(random.random()-0.5)*perspective_shreshold, y_min_per+(random.random()-0.5)*perspective_shreshold], [x_min_per+(random.random()-0.5)*perspective_shreshold, y_max_per+(random.random()-0.5)*perspective_shreshold], [x_max_per+(random.random()-0.5)*perspective_shreshold, y_max_per+(random.random()-0.5)*perspective_shreshold]])) e_1 = np.linalg.norm(pts2[0]-pts2[1]) e_2 = np.linalg.norm(pts2[0]-pts2[2]) e_3 = np.linalg.norm(pts2[1]-pts2[3]) e_4 = np.linalg.norm(pts2[2]-pts2[3]) if e_1_+perspective_shreshold_h > e_1 and e_2_+perspective_shreshold_w > e_2 and e_3_+perspective_shreshold_w > e_3 and e_4_+perspective_shreshold_h > e_4 and \ e_1_ - perspective_shreshold_h < e_1 and e_2_ - perspective_shreshold_w < e_2 and e_3_ - perspective_shreshold_w < e_3 and e_4_ - perspective_shreshold_h < e_4 and \ abs(e_1-e_4) < perspective_shreshold_h and abs(e_2-e_3) < perspective_shreshold_w: a0_, a1_, a2_, a3_ = self.get_angle_4(pts2) if (a0_ > a_min_ and a0_ < a_max_) or (a1_ > a_min_ and a1_ < a_max_) or (a2_ > a_min_ and a2_ < a_max_) or (a3_ > a_min_ and a3_ < a_max_): break M = cv2.getPerspectiveTransform(pts1, pts2) one = np.ones((self.new_shape[0], self.new_shape[1], 1), dtype=np.int16) matr = np.dstack((pixel_position, one)) new = np.dot(M, matr.reshape(-1, 3).T).T.reshape(self.new_shape[0], self.new_shape[1], 3) x = new[:, :, 0]/new[:, :, 2] y = new[:, :, 1]/new[:, :, 2] perturbed_xy_ = np.dstack((x, y)) # perturbed_xy_round_int = np.around(cv2.bilateralFilter(perturbed_xy_round_int, 9, 75, 75)) # perturbed_xy_round_int = np.around(cv2.blur(perturbed_xy_, (17, 17))) # perturbed_xy_round_int = cv2.blur(perturbed_xy_round_int, (17, 17)) # perturbed_xy_round_int = cv2.GaussianBlur(perturbed_xy_round_int, (7, 7), 0) perturbed_xy_ = perturbed_xy_-np.min(perturbed_xy_.T.reshape(2, -1), 1) # perturbed_xy_round_int = np.around(perturbed_xy_round_int-np.min(perturbed_xy_round_int.T.reshape(2, -1), 1)).astype(np.int16) self.perturbed_xy_ += perturbed_xy_ '''perspective end''' '''to img''' flat_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape( self.new_shape[0] * self.new_shape[1], 2) # self.perturbed_xy_ = cv2.blur(self.perturbed_xy_, (7, 7)) self.perturbed_xy_ = cv2.GaussianBlur(self.perturbed_xy_, (7, 7), 0) '''get fiducial points''' fiducial_points_coordinate = self.perturbed_xy_[im_x, im_y] vtx, wts = self.interp_weights(self.perturbed_xy_.reshape(self.new_shape[0] * self.new_shape[1], 2), flat_position) wts_sum = np.abs(wts).sum(-1) # flat_img.reshape(flat_shape[0] * flat_shape[1], 3)[:] = interpolate(pixel, vtx, wts) wts = wts[wts_sum <= 1, :] vtx = vtx[wts_sum <= 1, :] synthesis_perturbed_img.reshape(self.new_shape[0] * self.new_shape[1], 3)[wts_sum <= 1, :] = self.interpolate(synthesis_perturbed_img_map.reshape(self.new_shape[0] * self.new_shape[1], 3), vtx, wts) synthesis_perturbed_label.reshape(self.new_shape[0] * self.new_shape[1], 2)[wts_sum <= 1, :] = self.interpolate(synthesis_perturbed_label_map.reshape(self.new_shape[0] * self.new_shape[1], 2), vtx, wts) foreORbackground_label = np.zeros(self.new_shape) foreORbackground_label.reshape(self.new_shape[0] * self.new_shape[1], 1)[wts_sum <= 1, :] = self.interpolate(foreORbackground_label_map.reshape(self.new_shape[0] * self.new_shape[1], 1), vtx, wts) foreORbackground_label[foreORbackground_label < 0.99] = 0 foreORbackground_label[foreORbackground_label >= 0.99] = 1 self.synthesis_perturbed_img = synthesis_perturbed_img self.synthesis_perturbed_label = synthesis_perturbed_label self.foreORbackground_label = foreORbackground_label '''draw fiducial points stepSize = 0 fiducial_points_synthesis_perturbed_img = self.synthesis_perturbed_img.copy() for l in fiducial_points_coordinate.astype(np.int64).reshape(-1,2): cv2.circle(fiducial_points_synthesis_perturbed_img, (l[1] + math.ceil(stepSize / 2), l[0] + math.ceil(stepSize / 2)), 5, (0, 0, 255), -1) cv2.imwrite('/lustre/home/gwxie/program/project/unwarp/unwarp_perturbed/TPS/img/cv_TPS_large.jpg', fiducial_points_synthesis_perturbed_img) ''' '''clip''' perturbed_x_min, perturbed_y_min, perturbed_x_max, perturbed_y_max = -1, -1, self.new_shape[0], self.new_shape[1] for x in range(self.new_shape[0] // 2, perturbed_x_max): if
np.sum(self.synthesis_perturbed_img[x, :])
numpy.sum