repo_name
stringlengths
6
96
path
stringlengths
4
180
copies
stringlengths
1
3
size
stringlengths
4
6
content
stringlengths
906
722k
license
stringclasses
15 values
kmike/scikit-learn
sklearn/utils/__init__.py
3
10094
""" The :mod:`sklearn.utils` module includes various utilites. """ from collections import Sequence import numpy as np from scipy.sparse import issparse import warnings from .murmurhash import murmurhash3_32 from .validation import (as_float_array, check_arrays, safe_asarray, assert_all_finite, array2d, atleast2d_or_csc, atleast2d_or_csr, warn_if_not_float, check_random_state) from .class_weight import compute_class_weight __all__ = ["murmurhash3_32", "as_float_array", "check_arrays", "safe_asarray", "assert_all_finite", "array2d", "atleast2d_or_csc", "atleast2d_or_csr", "warn_if_not_float", "check_random_state", "compute_class_weight"] # Make sure that DeprecationWarning get printed warnings.simplefilter("always", DeprecationWarning) class deprecated(object): """Decorator to mark a function or class as deprecated. Issue a warning when the function is called/the class is instantiated and adds a warning to the docstring. The optional extra argument will be appended to the deprecation message and the docstring. Note: to use this with the default value for extra, put in an empty of parentheses: >>> from sklearn.utils import deprecated >>> deprecated() # doctest: +ELLIPSIS <sklearn.utils.deprecated object at ...> >>> @deprecated() ... def some_function(): pass """ # Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary, # but with many changes. def __init__(self, extra=''): """ Parameters ---------- extra: string to be added to the deprecation messages """ self.extra = extra def __call__(self, obj): if isinstance(obj, type): return self._decorate_class(obj) else: return self._decorate_fun(obj) def _decorate_class(self, cls): msg = "Class %s is deprecated" % cls.__name__ if self.extra: msg += "; %s" % self.extra # FIXME: we should probably reset __new__ for full generality init = cls.__init__ def wrapped(*args, **kwargs): warnings.warn(msg, category=DeprecationWarning) return init(*args, **kwargs) cls.__init__ = wrapped wrapped.__name__ = '__init__' wrapped.__doc__ = self._update_doc(init.__doc__) wrapped.deprecated_original = init return cls def _decorate_fun(self, fun): """Decorate function fun""" msg = "Function %s is deprecated" % fun.__name__ if self.extra: msg += "; %s" % self.extra def wrapped(*args, **kwargs): warnings.warn(msg, category=DeprecationWarning) return fun(*args, **kwargs) wrapped.__name__ = fun.__name__ wrapped.__dict__ = fun.__dict__ wrapped.__doc__ = self._update_doc(fun.__doc__) return wrapped def _update_doc(self, olddoc): newdoc = "DEPRECATED" if self.extra: newdoc = "%s: %s" % (newdoc, self.extra) if olddoc: newdoc = "%s\n\n%s" % (newdoc, olddoc) return newdoc def safe_mask(X, mask): """Return a mask which is safe to use on X. Parameters ---------- X : {array-like, sparse matrix} Data on which to apply mask. mask: array Mask to be used on X. Returns ------- mask """ mask = np.asanyarray(mask) if np.issubdtype(mask.dtype, np.int): return mask if hasattr(X, "toarray"): ind = np.arange(mask.shape[0]) mask = ind[mask] return mask def resample(*arrays, **options): """Resample arrays or sparse matrices in a consistent way The default strategy implements one step of the bootstrapping procedure. Parameters ---------- `*arrays` : sequence of arrays or scipy.sparse matrices with same shape[0] replace : boolean, True by default Implements resampling with replacement. If False, this will implement (sliced) random permutations. n_samples : int, None by default Number of samples to generate. If left to None this is automatically set to the first dimension of the arrays. random_state : int or RandomState instance Control the shuffling for reproducible behavior. Returns ------- Sequence of resampled views of the collections. The original arrays are not impacted. Examples -------- It is possible to mix sparse and dense arrays in the same run:: >>> X = [[1., 0.], [2., 1.], [0., 0.]] >>> y = np.array([0, 1, 2]) >>> from scipy.sparse import coo_matrix >>> X_sparse = coo_matrix(X) >>> from sklearn.utils import resample >>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0) >>> X array([[ 1., 0.], [ 2., 1.], [ 1., 0.]]) >>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE <3x2 sparse matrix of type '<... 'numpy.float64'>' with 4 stored elements in Compressed Sparse Row format> >>> X_sparse.toarray() array([[ 1., 0.], [ 2., 1.], [ 1., 0.]]) >>> y array([0, 1, 0]) >>> resample(y, n_samples=2, random_state=0) array([0, 1]) See also -------- :class:`sklearn.cross_validation.Bootstrap` :func:`sklearn.utils.shuffle` """ random_state = check_random_state(options.pop('random_state', None)) replace = options.pop('replace', True) max_n_samples = options.pop('n_samples', None) if options: raise ValueError("Unexpected kw arguments: %r" % options.keys()) if len(arrays) == 0: return None first = arrays[0] n_samples = first.shape[0] if hasattr(first, 'shape') else len(first) if max_n_samples is None: max_n_samples = n_samples if max_n_samples > n_samples: raise ValueError("Cannot sample %d out of arrays with dim %d" % ( max_n_samples, n_samples)) arrays = check_arrays(*arrays, sparse_format='csr') if replace: indices = random_state.randint(0, n_samples, size=(max_n_samples,)) else: indices = np.arange(n_samples) random_state.shuffle(indices) indices = indices[:max_n_samples] resampled_arrays = [] for array in arrays: array = array[indices] resampled_arrays.append(array) if len(resampled_arrays) == 1: # syntactic sugar for the unit argument case return resampled_arrays[0] else: return resampled_arrays def shuffle(*arrays, **options): """Shuffle arrays or sparse matrices in a consistent way This is a convenience alias to ``resample(*arrays, replace=False)`` to do random permutations of the collections. Parameters ---------- `*arrays` : sequence of arrays or scipy.sparse matrices with same shape[0] random_state : int or RandomState instance Control the shuffling for reproducible behavior. n_samples : int, None by default Number of samples to generate. If left to None this is automatically set to the first dimension of the arrays. Returns ------- Sequence of shuffled views of the collections. The original arrays are not impacted. Examples -------- It is possible to mix sparse and dense arrays in the same run:: >>> X = [[1., 0.], [2., 1.], [0., 0.]] >>> y = np.array([0, 1, 2]) >>> from scipy.sparse import coo_matrix >>> X_sparse = coo_matrix(X) >>> from sklearn.utils import shuffle >>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0) >>> X array([[ 0., 0.], [ 2., 1.], [ 1., 0.]]) >>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE <3x2 sparse matrix of type '<... 'numpy.float64'>' with 3 stored elements in Compressed Sparse Row format> >>> X_sparse.toarray() array([[ 0., 0.], [ 2., 1.], [ 1., 0.]]) >>> y array([2, 1, 0]) >>> shuffle(y, n_samples=2, random_state=0) array([0, 1]) See also -------- :func:`sklearn.utils.resample` """ options['replace'] = False return resample(*arrays, **options) def safe_sqr(X, copy=True): """Element wise squaring of array-likes and sparse matrices. Parameters ---------- X : array like, matrix, sparse matrix Returns ------- X ** 2 : element wise square """ X = safe_asarray(X) if issparse(X): if copy: X = X.copy() X.data **= 2 else: if copy: X = X ** 2 else: X **= 2 return X def gen_even_slices(n, n_packs): """Generator to create n_packs slices going up to n. Examples -------- >>> from sklearn.utils import gen_even_slices >>> list(gen_even_slices(10, 1)) [slice(0, 10, None)] >>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS [slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)] >>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS [slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)] >>> list(gen_even_slices(10, 3)) [slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)] """ start = 0 for pack_num in range(n_packs): this_n = n // n_packs if pack_num < n % n_packs: this_n += 1 if this_n > 0: end = start + this_n yield slice(start, end, None) start = end def tosequence(x): """Cast iterable x to a Sequence, avoiding a copy if possible.""" if isinstance(x, np.ndarray): return np.asarray(x) elif isinstance(x, Sequence): return x else: return list(x) class ConvergenceWarning(Warning): "Custom warning to capture convergence problems"
bsd-3-clause
mne-tools/mne-tools.github.io
0.20/_downloads/76822bb92a8465181ec2a7ee96ca8cf4/plot_decoding_csp_timefreq.py
1
6457
""" ============================================================================ Decoding in time-frequency space data using the Common Spatial Pattern (CSP) ============================================================================ The time-frequency decomposition is estimated by iterating over raw data that has been band-passed at different frequencies. This is used to compute a covariance matrix over each epoch or a rolling time-window and extract the CSP filtered signals. A linear discriminant classifier is then applied to these signals. """ # Authors: Laura Gwilliams <laura.gwilliams@nyu.edu> # Jean-Remi King <jeanremi.king@gmail.com> # Alex Barachant <alexandre.barachant@gmail.com> # Alexandre Gramfort <alexandre.gramfort@inria.fr> # # License: BSD (3-clause) import numpy as np import matplotlib.pyplot as plt from mne import Epochs, create_info, events_from_annotations from mne.io import concatenate_raws, read_raw_edf from mne.datasets import eegbci from mne.decoding import CSP from mne.time_frequency import AverageTFR from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.model_selection import StratifiedKFold, cross_val_score from sklearn.pipeline import make_pipeline from sklearn.preprocessing import LabelEncoder ############################################################################### # Set parameters and read data event_id = dict(hands=2, feet=3) # motor imagery: hands vs feet subject = 1 runs = [6, 10, 14] raw_fnames = eegbci.load_data(subject, runs) raw = concatenate_raws([read_raw_edf(f, preload=True) for f in raw_fnames]) # Extract information from the raw file sfreq = raw.info['sfreq'] events, _ = events_from_annotations(raw, event_id=dict(T1=2, T2=3)) raw.pick_types(meg=False, eeg=True, stim=False, eog=False, exclude='bads') # Assemble the classifier using scikit-learn pipeline clf = make_pipeline(CSP(n_components=4, reg=None, log=True, norm_trace=False), LinearDiscriminantAnalysis()) n_splits = 5 # how many folds to use for cross-validation cv = StratifiedKFold(n_splits=n_splits, shuffle=True) # Classification & Time-frequency parameters tmin, tmax = -.200, 2.000 n_cycles = 10. # how many complete cycles: used to define window size min_freq = 5. max_freq = 25. n_freqs = 8 # how many frequency bins to use # Assemble list of frequency range tuples freqs = np.linspace(min_freq, max_freq, n_freqs) # assemble frequencies freq_ranges = list(zip(freqs[:-1], freqs[1:])) # make freqs list of tuples # Infer window spacing from the max freq and number of cycles to avoid gaps window_spacing = (n_cycles / np.max(freqs) / 2.) centered_w_times = np.arange(tmin, tmax, window_spacing)[1:] n_windows = len(centered_w_times) # Instantiate label encoder le = LabelEncoder() ############################################################################### # Loop through frequencies, apply classifier and save scores # init scores freq_scores = np.zeros((n_freqs - 1,)) # Loop through each frequency range of interest for freq, (fmin, fmax) in enumerate(freq_ranges): # Infer window size based on the frequency being used w_size = n_cycles / ((fmax + fmin) / 2.) # in seconds # Apply band-pass filter to isolate the specified frequencies raw_filter = raw.copy().filter(fmin, fmax, n_jobs=1, fir_design='firwin', skip_by_annotation='edge') # Extract epochs from filtered data, padded by window size epochs = Epochs(raw_filter, events, event_id, tmin - w_size, tmax + w_size, proj=False, baseline=None, preload=True) epochs.drop_bad() y = le.fit_transform(epochs.events[:, 2]) X = epochs.get_data() # Save mean scores over folds for each frequency and time window freq_scores[freq] = np.mean(cross_val_score(estimator=clf, X=X, y=y, scoring='roc_auc', cv=cv, n_jobs=1), axis=0) ############################################################################### # Plot frequency results plt.bar(freqs[:-1], freq_scores, width=np.diff(freqs)[0], align='edge', edgecolor='black') plt.xticks(freqs) plt.ylim([0, 1]) plt.axhline(len(epochs['feet']) / len(epochs), color='k', linestyle='--', label='chance level') plt.legend() plt.xlabel('Frequency (Hz)') plt.ylabel('Decoding Scores') plt.title('Frequency Decoding Scores') ############################################################################### # Loop through frequencies and time, apply classifier and save scores # init scores tf_scores = np.zeros((n_freqs - 1, n_windows)) # Loop through each frequency range of interest for freq, (fmin, fmax) in enumerate(freq_ranges): # Infer window size based on the frequency being used w_size = n_cycles / ((fmax + fmin) / 2.) # in seconds # Apply band-pass filter to isolate the specified frequencies raw_filter = raw.copy().filter(fmin, fmax, n_jobs=1, fir_design='firwin', skip_by_annotation='edge') # Extract epochs from filtered data, padded by window size epochs = Epochs(raw_filter, events, event_id, tmin - w_size, tmax + w_size, proj=False, baseline=None, preload=True) epochs.drop_bad() y = le.fit_transform(epochs.events[:, 2]) # Roll covariance, csp and lda over time for t, w_time in enumerate(centered_w_times): # Center the min and max of the window w_tmin = w_time - w_size / 2. w_tmax = w_time + w_size / 2. # Crop data into time-window of interest X = epochs.copy().crop(w_tmin, w_tmax).get_data() # Save mean scores over folds for each frequency and time window tf_scores[freq, t] = np.mean(cross_val_score(estimator=clf, X=X, y=y, scoring='roc_auc', cv=cv, n_jobs=1), axis=0) ############################################################################### # Plot time-frequency results # Set up time frequency object av_tfr = AverageTFR(create_info(['freq'], sfreq), tf_scores[np.newaxis, :], centered_w_times, freqs[1:], 1) chance = np.mean(y) # set chance level to white in the plot av_tfr.plot([0], vmin=chance, title="Time-Frequency Decoding Scores", cmap=plt.cm.Reds)
bsd-3-clause
bijanfallah/OI_CCLM
src/RMSE_MAPS_INGO.py
1
2007
# Program to show the maps of RMSE averaged over time import matplotlib.pyplot as plt from sklearn.metrics import mean_squared_error import os from netCDF4 import Dataset as NetCDFFile import numpy as np from CCLM_OUTS import Plot_CCLM # option == 1 -> shift 4 with default cclm domain and nboundlines = 3 # option == 2 -> shift 4 with smaller cclm domain and nboundlines = 3 # option == 3 -> shift 4 with smaller cclm domain and nboundlines = 6 # option == 4 -> shift 4 with corrected smaller cclm domain and nboundlines = 3 # option == 5 -> shift 4 with corrected smaller cclm domain and nboundlines = 4 # option == 6 -> shift 4 with corrected smaller cclm domain and nboundlines = 6 # option == 7 -> shift 4 with corrected smaller cclm domain and nboundlines = 9 # option == 8 -> shift 4 with corrected bigger cclm domain and nboundlines = 3 from CCLM_OUTS import Plot_CCLM #def f(x): # if x==-9999: # return float('NaN') # else: # return x def read_data_from_mistral(dir='/work/bb1029/b324045/work1/work/member/post/',name='member_T_2M_ts_seasmean.nc',var='T_2M'): # type: (object, object, object) -> object #a function to read the data from mistral work """ :rtype: object """ #CMD = 'scp $mistral:' + dir + name + ' ./' CMD = 'wget users.met.fu-berlin.de/~BijanFallah/' + dir + name os.system(CMD) nc = NetCDFFile(name) # for name2, variable in nc.variables.items(): # for attrname in variable.ncattrs(): # print(name2, variable, '-----------------',attrname) # #print("{} -- {}".format(attrname, getattr(variable, attrname))) os.remove(name) lats = nc.variables['lat'][:] lons = nc.variables['lon'][:] t = nc.variables[var][:].squeeze() rlats = nc.variables['rlat'][:] # extract/copy the data rlons = nc.variables['rlon'][:] #f2 = np.vectorize(f) #t= f2(t) #t=t.data t=t.squeeze() #print() nc.close() return(t, lats, lons, rlats, rlons)
mit
lancezlin/ml_template_py
lib/python2.7/site-packages/sklearn/metrics/tests/test_score_objects.py
15
17443
import pickle import tempfile import shutil import os import numbers import numpy as np from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_raises_regexp from sklearn.utils.testing import assert_true from sklearn.utils.testing import ignore_warnings from sklearn.utils.testing import assert_not_equal from sklearn.utils.testing import assert_warns_message from sklearn.base import BaseEstimator from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score, log_loss, precision_score, recall_score) from sklearn.metrics.cluster import adjusted_rand_score from sklearn.metrics.scorer import (check_scoring, _PredictScorer, _passthrough_scorer) from sklearn.metrics import make_scorer, get_scorer, SCORERS from sklearn.svm import LinearSVC from sklearn.pipeline import make_pipeline from sklearn.cluster import KMeans from sklearn.dummy import DummyRegressor from sklearn.linear_model import Ridge, LogisticRegression from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor from sklearn.datasets import make_blobs from sklearn.datasets import make_classification from sklearn.datasets import make_multilabel_classification from sklearn.datasets import load_diabetes from sklearn.model_selection import train_test_split, cross_val_score from sklearn.model_selection import GridSearchCV from sklearn.multiclass import OneVsRestClassifier from sklearn.externals import joblib REGRESSION_SCORERS = ['r2', 'neg_mean_absolute_error', 'neg_mean_squared_error', 'neg_median_absolute_error', 'mean_absolute_error', 'mean_squared_error', 'median_absolute_error'] CLF_SCORERS = ['accuracy', 'f1', 'f1_weighted', 'f1_macro', 'f1_micro', 'roc_auc', 'average_precision', 'precision', 'precision_weighted', 'precision_macro', 'precision_micro', 'recall', 'recall_weighted', 'recall_macro', 'recall_micro', 'neg_log_loss', 'log_loss', 'adjusted_rand_score' # not really, but works ] MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples'] def _make_estimators(X_train, y_train, y_ml_train): # Make estimators that make sense to test various scoring methods sensible_regr = DummyRegressor(strategy='median') sensible_regr.fit(X_train, y_train) sensible_clf = DecisionTreeClassifier(random_state=0) sensible_clf.fit(X_train, y_train) sensible_ml_clf = DecisionTreeClassifier(random_state=0) sensible_ml_clf.fit(X_train, y_ml_train) return dict( [(name, sensible_regr) for name in REGRESSION_SCORERS] + [(name, sensible_clf) for name in CLF_SCORERS] + [(name, sensible_ml_clf) for name in MULTILABEL_ONLY_SCORERS] ) X_mm, y_mm, y_ml_mm = None, None, None ESTIMATORS = None TEMP_FOLDER = None def setup_module(): # Create some memory mapped data global X_mm, y_mm, y_ml_mm, TEMP_FOLDER, ESTIMATORS TEMP_FOLDER = tempfile.mkdtemp(prefix='sklearn_test_score_objects_') X, y = make_classification(n_samples=30, n_features=5, random_state=0) _, y_ml = make_multilabel_classification(n_samples=X.shape[0], random_state=0) filename = os.path.join(TEMP_FOLDER, 'test_data.pkl') joblib.dump((X, y, y_ml), filename) X_mm, y_mm, y_ml_mm = joblib.load(filename, mmap_mode='r') ESTIMATORS = _make_estimators(X_mm, y_mm, y_ml_mm) def teardown_module(): global X_mm, y_mm, y_ml_mm, TEMP_FOLDER, ESTIMATORS # GC closes the mmap file descriptors X_mm, y_mm, y_ml_mm, ESTIMATORS = None, None, None, None shutil.rmtree(TEMP_FOLDER) class EstimatorWithoutFit(object): """Dummy estimator to test check_scoring""" pass class EstimatorWithFit(BaseEstimator): """Dummy estimator to test check_scoring""" def fit(self, X, y): return self class EstimatorWithFitAndScore(object): """Dummy estimator to test check_scoring""" def fit(self, X, y): return self def score(self, X, y): return 1.0 class EstimatorWithFitAndPredict(object): """Dummy estimator to test check_scoring""" def fit(self, X, y): self.y = y return self def predict(self, X): return self.y class DummyScorer(object): """Dummy scorer that always returns 1.""" def __call__(self, est, X, y): return 1 def test_all_scorers_repr(): # Test that all scorers have a working repr for name, scorer in SCORERS.items(): repr(scorer) def test_check_scoring(): # Test all branches of check_scoring estimator = EstimatorWithoutFit() pattern = (r"estimator should be an estimator implementing 'fit' method," r" .* was passed") assert_raises_regexp(TypeError, pattern, check_scoring, estimator) estimator = EstimatorWithFitAndScore() estimator.fit([[1]], [1]) scorer = check_scoring(estimator) assert_true(scorer is _passthrough_scorer) assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0) estimator = EstimatorWithFitAndPredict() estimator.fit([[1]], [1]) pattern = (r"If no scoring is specified, the estimator passed should have" r" a 'score' method\. The estimator .* does not\.") assert_raises_regexp(TypeError, pattern, check_scoring, estimator) scorer = check_scoring(estimator, "accuracy") assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0) estimator = EstimatorWithFit() scorer = check_scoring(estimator, "accuracy") assert_true(isinstance(scorer, _PredictScorer)) estimator = EstimatorWithFit() scorer = check_scoring(estimator, allow_none=True) assert_true(scorer is None) def test_check_scoring_gridsearchcv(): # test that check_scoring works on GridSearchCV and pipeline. # slightly redundant non-regression test. grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]}) scorer = check_scoring(grid, "f1") assert_true(isinstance(scorer, _PredictScorer)) pipe = make_pipeline(LinearSVC()) scorer = check_scoring(pipe, "f1") assert_true(isinstance(scorer, _PredictScorer)) # check that cross_val_score definitely calls the scorer # and doesn't make any assumptions about the estimator apart from having a # fit. scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1], scoring=DummyScorer()) assert_array_equal(scores, 1) def test_make_scorer(): # Sanity check on the make_scorer factory function. f = lambda *args: 0 assert_raises(ValueError, make_scorer, f, needs_threshold=True, needs_proba=True) def test_classification_scores(): # Test classification scorers. X, y = make_blobs(random_state=0, centers=2) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) clf = LinearSVC(random_state=0) clf.fit(X_train, y_train) for prefix, metric in [('f1', f1_score), ('precision', precision_score), ('recall', recall_score)]: score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test) score2 = metric(y_test, clf.predict(X_test), pos_label=None, average='weighted') assert_almost_equal(score1, score2) score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test) score2 = metric(y_test, clf.predict(X_test), pos_label=None, average='macro') assert_almost_equal(score1, score2) score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test) score2 = metric(y_test, clf.predict(X_test), pos_label=None, average='micro') assert_almost_equal(score1, score2) score1 = get_scorer('%s' % prefix)(clf, X_test, y_test) score2 = metric(y_test, clf.predict(X_test), pos_label=1) assert_almost_equal(score1, score2) # test fbeta score that takes an argument scorer = make_scorer(fbeta_score, beta=2) score1 = scorer(clf, X_test, y_test) score2 = fbeta_score(y_test, clf.predict(X_test), beta=2) assert_almost_equal(score1, score2) # test that custom scorer can be pickled unpickled_scorer = pickle.loads(pickle.dumps(scorer)) score3 = unpickled_scorer(clf, X_test, y_test) assert_almost_equal(score1, score3) # smoke test the repr: repr(fbeta_score) def test_regression_scorers(): # Test regression scorers. diabetes = load_diabetes() X, y = diabetes.data, diabetes.target X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) clf = Ridge() clf.fit(X_train, y_train) score1 = get_scorer('r2')(clf, X_test, y_test) score2 = r2_score(y_test, clf.predict(X_test)) assert_almost_equal(score1, score2) def test_thresholded_scorers(): # Test scorers that take thresholds. X, y = make_blobs(random_state=0, centers=2) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) clf = LogisticRegression(random_state=0) clf.fit(X_train, y_train) score1 = get_scorer('roc_auc')(clf, X_test, y_test) score2 = roc_auc_score(y_test, clf.decision_function(X_test)) score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1]) assert_almost_equal(score1, score2) assert_almost_equal(score1, score3) logscore = get_scorer('neg_log_loss')(clf, X_test, y_test) logloss = log_loss(y_test, clf.predict_proba(X_test)) assert_almost_equal(-logscore, logloss) # same for an estimator without decision_function clf = DecisionTreeClassifier() clf.fit(X_train, y_train) score1 = get_scorer('roc_auc')(clf, X_test, y_test) score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1]) assert_almost_equal(score1, score2) # test with a regressor (no decision_function) reg = DecisionTreeRegressor() reg.fit(X_train, y_train) score1 = get_scorer('roc_auc')(reg, X_test, y_test) score2 = roc_auc_score(y_test, reg.predict(X_test)) assert_almost_equal(score1, score2) # Test that an exception is raised on more than two classes X, y = make_blobs(random_state=0, centers=3) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) clf.fit(X_train, y_train) assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test) def test_thresholded_scorers_multilabel_indicator_data(): # Test that the scorer work with multilabel-indicator format # for multilabel and multi-output multi-class classifier X, y = make_multilabel_classification(allow_unlabeled=False, random_state=0) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) # Multi-output multi-class predict_proba clf = DecisionTreeClassifier() clf.fit(X_train, y_train) y_proba = clf.predict_proba(X_test) score1 = get_scorer('roc_auc')(clf, X_test, y_test) score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T) assert_almost_equal(score1, score2) # Multi-output multi-class decision_function # TODO Is there any yet? clf = DecisionTreeClassifier() clf.fit(X_train, y_train) clf._predict_proba = clf.predict_proba clf.predict_proba = None clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)] y_proba = clf.decision_function(X_test) score1 = get_scorer('roc_auc')(clf, X_test, y_test) score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T) assert_almost_equal(score1, score2) # Multilabel predict_proba clf = OneVsRestClassifier(DecisionTreeClassifier()) clf.fit(X_train, y_train) score1 = get_scorer('roc_auc')(clf, X_test, y_test) score2 = roc_auc_score(y_test, clf.predict_proba(X_test)) assert_almost_equal(score1, score2) # Multilabel decision function clf = OneVsRestClassifier(LinearSVC(random_state=0)) clf.fit(X_train, y_train) score1 = get_scorer('roc_auc')(clf, X_test, y_test) score2 = roc_auc_score(y_test, clf.decision_function(X_test)) assert_almost_equal(score1, score2) def test_unsupervised_scorers(): # Test clustering scorers against gold standard labeling. # We don't have any real unsupervised Scorers yet. X, y = make_blobs(random_state=0, centers=2) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) km = KMeans(n_clusters=3) km.fit(X_train) score1 = get_scorer('adjusted_rand_score')(km, X_test, y_test) score2 = adjusted_rand_score(y_test, km.predict(X_test)) assert_almost_equal(score1, score2) @ignore_warnings def test_raises_on_score_list(): # Test that when a list of scores is returned, we raise proper errors. X, y = make_blobs(random_state=0) f1_scorer_no_average = make_scorer(f1_score, average=None) clf = DecisionTreeClassifier() assert_raises(ValueError, cross_val_score, clf, X, y, scoring=f1_scorer_no_average) grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average, param_grid={'max_depth': [1, 2]}) assert_raises(ValueError, grid_search.fit, X, y) @ignore_warnings def test_scorer_sample_weight(): # Test that scorers support sample_weight or raise sensible errors # Unlike the metrics invariance test, in the scorer case it's harder # to ensure that, on the classifier output, weighted and unweighted # scores really should be unequal. X, y = make_classification(random_state=0) _, y_ml = make_multilabel_classification(n_samples=X.shape[0], random_state=0) split = train_test_split(X, y, y_ml, random_state=0) X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split sample_weight = np.ones_like(y_test) sample_weight[:10] = 0 # get sensible estimators for each metric estimator = _make_estimators(X_train, y_train, y_ml_train) for name, scorer in SCORERS.items(): if name in MULTILABEL_ONLY_SCORERS: target = y_ml_test else: target = y_test try: weighted = scorer(estimator[name], X_test, target, sample_weight=sample_weight) ignored = scorer(estimator[name], X_test[10:], target[10:]) unweighted = scorer(estimator[name], X_test, target) assert_not_equal(weighted, unweighted, msg="scorer {0} behaves identically when " "called with sample weights: {1} vs " "{2}".format(name, weighted, unweighted)) assert_almost_equal(weighted, ignored, err_msg="scorer {0} behaves differently when " "ignoring samples and setting sample_weight to" " 0: {1} vs {2}".format(name, weighted, ignored)) except TypeError as e: assert_true("sample_weight" in str(e), "scorer {0} raises unhelpful exception when called " "with sample weights: {1}".format(name, str(e))) @ignore_warnings # UndefinedMetricWarning for P / R scores def check_scorer_memmap(scorer_name): scorer, estimator = SCORERS[scorer_name], ESTIMATORS[scorer_name] if scorer_name in MULTILABEL_ONLY_SCORERS: score = scorer(estimator, X_mm, y_ml_mm) else: score = scorer(estimator, X_mm, y_mm) assert isinstance(score, numbers.Number), scorer_name def test_scorer_memmap_input(): # Non-regression test for #6147: some score functions would # return singleton memmap when computed on memmap data instead of scalar # float values. for name in SCORERS.keys(): yield check_scorer_memmap, name def test_deprecated_names(): X, y = make_blobs(random_state=0, centers=2) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) clf = LogisticRegression(random_state=0) clf.fit(X_train, y_train) for name in ('mean_absolute_error', 'mean_squared_error', 'median_absolute_error', 'log_loss'): warning_msg = "Scoring method %s was renamed to" % name for scorer in (get_scorer(name), SCORERS[name]): assert_warns_message(DeprecationWarning, warning_msg, scorer, clf, X, y) assert_warns_message(DeprecationWarning, warning_msg, cross_val_score, clf, X, y, scoring=name) def test_scoring_is_not_metric(): assert_raises_regexp(ValueError, 'make_scorer', check_scoring, LogisticRegression(), f1_score) assert_raises_regexp(ValueError, 'make_scorer', check_scoring, LogisticRegression(), roc_auc_score) assert_raises_regexp(ValueError, 'make_scorer', check_scoring, Ridge(), r2_score) assert_raises_regexp(ValueError, 'make_scorer', check_scoring, KMeans(), adjusted_rand_score)
mit
ChanChiChoi/scikit-learn
examples/model_selection/plot_roc.py
146
3697
""" ======================================= Receiver Operating Characteristic (ROC) ======================================= Example of Receiver Operating Characteristic (ROC) metric to evaluate classifier output quality. ROC curves typically feature true positive rate on the Y axis, and false positive rate on the X axis. This means that the top left corner of the plot is the "ideal" point - a false positive rate of zero, and a true positive rate of one. This is not very realistic, but it does mean that a larger area under the curve (AUC) is usually better. The "steepness" of ROC curves is also important, since it is ideal to maximize the true positive rate while minimizing the false positive rate. ROC curves are typically used in binary classification to study the output of a classifier. In order to extend ROC curve and ROC area to multi-class or multi-label classification, it is necessary to binarize the output. One ROC curve can be drawn per label, but one can also draw a ROC curve by considering each element of the label indicator matrix as a binary prediction (micro-averaging). .. note:: See also :func:`sklearn.metrics.roc_auc_score`, :ref:`example_model_selection_plot_roc_crossval.py`. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import svm, datasets from sklearn.metrics import roc_curve, auc from sklearn.cross_validation import train_test_split from sklearn.preprocessing import label_binarize from sklearn.multiclass import OneVsRestClassifier # Import some data to play with iris = datasets.load_iris() X = iris.data y = iris.target # Binarize the output y = label_binarize(y, classes=[0, 1, 2]) n_classes = y.shape[1] # Add noisy features to make the problem harder random_state = np.random.RandomState(0) n_samples, n_features = X.shape X = np.c_[X, random_state.randn(n_samples, 200 * n_features)] # shuffle and split training and test sets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5, random_state=0) # Learn to predict each class against the other classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True, random_state=random_state)) y_score = classifier.fit(X_train, y_train).decision_function(X_test) # Compute ROC curve and ROC area for each class fpr = dict() tpr = dict() roc_auc = dict() for i in range(n_classes): fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i]) roc_auc[i] = auc(fpr[i], tpr[i]) # Compute micro-average ROC curve and ROC area fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel()) roc_auc["micro"] = auc(fpr["micro"], tpr["micro"]) # Plot of a ROC curve for a specific class plt.figure() plt.plot(fpr[2], tpr[2], label='ROC curve (area = %0.2f)' % roc_auc[2]) plt.plot([0, 1], [0, 1], 'k--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic example') plt.legend(loc="lower right") plt.show() # Plot ROC curve plt.figure() plt.plot(fpr["micro"], tpr["micro"], label='micro-average ROC curve (area = {0:0.2f})' ''.format(roc_auc["micro"])) for i in range(n_classes): plt.plot(fpr[i], tpr[i], label='ROC curve of class {0} (area = {1:0.2f})' ''.format(i, roc_auc[i])) plt.plot([0, 1], [0, 1], 'k--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Some extension of Receiver operating characteristic to multi-class') plt.legend(loc="lower right") plt.show()
bsd-3-clause
belltailjp/scikit-learn
sklearn/decomposition/base.py
313
5647
"""Principal Component Analysis Base Classes""" # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Olivier Grisel <olivier.grisel@ensta.org> # Mathieu Blondel <mathieu@mblondel.org> # Denis A. Engemann <d.engemann@fz-juelich.de> # Kyle Kastner <kastnerkyle@gmail.com> # # License: BSD 3 clause import numpy as np from scipy import linalg from ..base import BaseEstimator, TransformerMixin from ..utils import check_array from ..utils.extmath import fast_dot from ..utils.validation import check_is_fitted from ..externals import six from abc import ABCMeta, abstractmethod class _BasePCA(six.with_metaclass(ABCMeta, BaseEstimator, TransformerMixin)): """Base class for PCA methods. Warning: This class should not be used directly. Use derived classes instead. """ def get_covariance(self): """Compute data covariance with the generative model. ``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)`` where S**2 contains the explained variances, and sigma2 contains the noise variances. Returns ------- cov : array, shape=(n_features, n_features) Estimated covariance of data. """ components_ = self.components_ exp_var = self.explained_variance_ if self.whiten: components_ = components_ * np.sqrt(exp_var[:, np.newaxis]) exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.) cov = np.dot(components_.T * exp_var_diff, components_) cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace return cov def get_precision(self): """Compute data precision matrix with the generative model. Equals the inverse of the covariance but computed with the matrix inversion lemma for efficiency. Returns ------- precision : array, shape=(n_features, n_features) Estimated precision of data. """ n_features = self.components_.shape[1] # handle corner cases first if self.n_components_ == 0: return np.eye(n_features) / self.noise_variance_ if self.n_components_ == n_features: return linalg.inv(self.get_covariance()) # Get precision using matrix inversion lemma components_ = self.components_ exp_var = self.explained_variance_ if self.whiten: components_ = components_ * np.sqrt(exp_var[:, np.newaxis]) exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.) precision = np.dot(components_, components_.T) / self.noise_variance_ precision.flat[::len(precision) + 1] += 1. / exp_var_diff precision = np.dot(components_.T, np.dot(linalg.inv(precision), components_)) precision /= -(self.noise_variance_ ** 2) precision.flat[::len(precision) + 1] += 1. / self.noise_variance_ return precision @abstractmethod def fit(X, y=None): """Placeholder for fit. Subclasses should implement this method! Fit the model with X. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data, where n_samples is the number of samples and n_features is the number of features. Returns ------- self : object Returns the instance itself. """ def transform(self, X, y=None): """Apply dimensionality reduction to X. X is projected on the first principal components previously extracted from a training set. Parameters ---------- X : array-like, shape (n_samples, n_features) New data, where n_samples is the number of samples and n_features is the number of features. Returns ------- X_new : array-like, shape (n_samples, n_components) Examples -------- >>> import numpy as np >>> from sklearn.decomposition import IncrementalPCA >>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]]) >>> ipca = IncrementalPCA(n_components=2, batch_size=3) >>> ipca.fit(X) IncrementalPCA(batch_size=3, copy=True, n_components=2, whiten=False) >>> ipca.transform(X) # doctest: +SKIP """ check_is_fitted(self, ['mean_', 'components_'], all_or_any=all) X = check_array(X) if self.mean_ is not None: X = X - self.mean_ X_transformed = fast_dot(X, self.components_.T) if self.whiten: X_transformed /= np.sqrt(self.explained_variance_) return X_transformed def inverse_transform(self, X, y=None): """Transform data back to its original space. In other words, return an input X_original whose transform would be X. Parameters ---------- X : array-like, shape (n_samples, n_components) New data, where n_samples is the number of samples and n_components is the number of components. Returns ------- X_original array-like, shape (n_samples, n_features) Notes ----- If whitening is enabled, inverse_transform will compute the exact inverse operation, which includes reversing whitening. """ if self.whiten: return fast_dot(X, np.sqrt(self.explained_variance_[:, np.newaxis]) * self.components_) + self.mean_ else: return fast_dot(X, self.components_) + self.mean_
bsd-3-clause
tosolveit/scikit-learn
sklearn/ensemble/tests/test_partial_dependence.py
365
6996
""" Testing for the partial dependence module. """ import numpy as np from numpy.testing import assert_array_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import if_matplotlib from sklearn.ensemble.partial_dependence import partial_dependence from sklearn.ensemble.partial_dependence import plot_partial_dependence from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import GradientBoostingRegressor from sklearn import datasets # toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] y = [-1, -1, -1, 1, 1, 1] T = [[-1, -1], [2, 2], [3, 2]] true_result = [-1, 1, 1] # also load the boston dataset boston = datasets.load_boston() # also load the iris dataset iris = datasets.load_iris() def test_partial_dependence_classifier(): # Test partial dependence for classifier clf = GradientBoostingClassifier(n_estimators=10, random_state=1) clf.fit(X, y) pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5) # only 4 grid points instead of 5 because only 4 unique X[:,0] vals assert pdp.shape == (1, 4) assert axes[0].shape[0] == 4 # now with our own grid X_ = np.asarray(X) grid = np.unique(X_[:, 0]) pdp_2, axes = partial_dependence(clf, [0], grid=grid) assert axes is None assert_array_equal(pdp, pdp_2) def test_partial_dependence_multiclass(): # Test partial dependence for multi-class classifier clf = GradientBoostingClassifier(n_estimators=10, random_state=1) clf.fit(iris.data, iris.target) grid_resolution = 25 n_classes = clf.n_classes_ pdp, axes = partial_dependence( clf, [0], X=iris.data, grid_resolution=grid_resolution) assert pdp.shape == (n_classes, grid_resolution) assert len(axes) == 1 assert axes[0].shape[0] == grid_resolution def test_partial_dependence_regressor(): # Test partial dependence for regressor clf = GradientBoostingRegressor(n_estimators=10, random_state=1) clf.fit(boston.data, boston.target) grid_resolution = 25 pdp, axes = partial_dependence( clf, [0], X=boston.data, grid_resolution=grid_resolution) assert pdp.shape == (1, grid_resolution) assert axes[0].shape[0] == grid_resolution def test_partial_dependecy_input(): # Test input validation of partial dependence. clf = GradientBoostingClassifier(n_estimators=10, random_state=1) clf.fit(X, y) assert_raises(ValueError, partial_dependence, clf, [0], grid=None, X=None) assert_raises(ValueError, partial_dependence, clf, [0], grid=[0, 1], X=X) # first argument must be an instance of BaseGradientBoosting assert_raises(ValueError, partial_dependence, {}, [0], X=X) # Gradient boosting estimator must be fit assert_raises(ValueError, partial_dependence, GradientBoostingClassifier(), [0], X=X) assert_raises(ValueError, partial_dependence, clf, [-1], X=X) assert_raises(ValueError, partial_dependence, clf, [100], X=X) # wrong ndim for grid grid = np.random.rand(10, 2, 1) assert_raises(ValueError, partial_dependence, clf, [0], grid=grid) @if_matplotlib def test_plot_partial_dependence(): # Test partial dependence plot function. clf = GradientBoostingRegressor(n_estimators=10, random_state=1) clf.fit(boston.data, boston.target) grid_resolution = 25 fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)], grid_resolution=grid_resolution, feature_names=boston.feature_names) assert len(axs) == 3 assert all(ax.has_data for ax in axs) # check with str features and array feature names fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN', ('CRIM', 'ZN')], grid_resolution=grid_resolution, feature_names=boston.feature_names) assert len(axs) == 3 assert all(ax.has_data for ax in axs) # check with list feature_names feature_names = boston.feature_names.tolist() fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN', ('CRIM', 'ZN')], grid_resolution=grid_resolution, feature_names=feature_names) assert len(axs) == 3 assert all(ax.has_data for ax in axs) @if_matplotlib def test_plot_partial_dependence_input(): # Test partial dependence plot function input checks. clf = GradientBoostingClassifier(n_estimators=10, random_state=1) # not fitted yet assert_raises(ValueError, plot_partial_dependence, clf, X, [0]) clf.fit(X, y) assert_raises(ValueError, plot_partial_dependence, clf, np.array(X)[:, :0], [0]) # first argument must be an instance of BaseGradientBoosting assert_raises(ValueError, plot_partial_dependence, {}, X, [0]) # must be larger than -1 assert_raises(ValueError, plot_partial_dependence, clf, X, [-1]) # too large feature value assert_raises(ValueError, plot_partial_dependence, clf, X, [100]) # str feature but no feature_names assert_raises(ValueError, plot_partial_dependence, clf, X, ['foobar']) # not valid features value assert_raises(ValueError, plot_partial_dependence, clf, X, [{'foo': 'bar'}]) @if_matplotlib def test_plot_partial_dependence_multiclass(): # Test partial dependence plot function on multi-class input. clf = GradientBoostingClassifier(n_estimators=10, random_state=1) clf.fit(iris.data, iris.target) grid_resolution = 25 fig, axs = plot_partial_dependence(clf, iris.data, [0, 1], label=0, grid_resolution=grid_resolution) assert len(axs) == 2 assert all(ax.has_data for ax in axs) # now with symbol labels target = iris.target_names[iris.target] clf = GradientBoostingClassifier(n_estimators=10, random_state=1) clf.fit(iris.data, target) grid_resolution = 25 fig, axs = plot_partial_dependence(clf, iris.data, [0, 1], label='setosa', grid_resolution=grid_resolution) assert len(axs) == 2 assert all(ax.has_data for ax in axs) # label not in gbrt.classes_ assert_raises(ValueError, plot_partial_dependence, clf, iris.data, [0, 1], label='foobar', grid_resolution=grid_resolution) # label not provided assert_raises(ValueError, plot_partial_dependence, clf, iris.data, [0, 1], grid_resolution=grid_resolution)
bsd-3-clause
jblackburne/scikit-learn
sklearn/neural_network/rbm.py
46
12291
"""Restricted Boltzmann Machine """ # Authors: Yann N. Dauphin <dauphiya@iro.umontreal.ca> # Vlad Niculae # Gabriel Synnaeve # Lars Buitinck # License: BSD 3 clause import time import numpy as np import scipy.sparse as sp from ..base import BaseEstimator from ..base import TransformerMixin from ..externals.six.moves import xrange from ..utils import check_array from ..utils import check_random_state from ..utils import gen_even_slices from ..utils import issparse from ..utils.extmath import safe_sparse_dot from ..utils.extmath import log_logistic from ..utils.fixes import expit # logistic function from ..utils.validation import check_is_fitted class BernoulliRBM(BaseEstimator, TransformerMixin): """Bernoulli Restricted Boltzmann Machine (RBM). A Restricted Boltzmann Machine with binary visible units and binary hidden units. Parameters are estimated using Stochastic Maximum Likelihood (SML), also known as Persistent Contrastive Divergence (PCD) [2]. The time complexity of this implementation is ``O(d ** 2)`` assuming d ~ n_features ~ n_components. Read more in the :ref:`User Guide <rbm>`. Parameters ---------- n_components : int, optional Number of binary hidden units. learning_rate : float, optional The learning rate for weight updates. It is *highly* recommended to tune this hyper-parameter. Reasonable values are in the 10**[0., -3.] range. batch_size : int, optional Number of examples per minibatch. n_iter : int, optional Number of iterations/sweeps over the training dataset to perform during training. verbose : int, optional The verbosity level. The default, zero, means silent mode. random_state : integer or numpy.RandomState, optional A random number generator instance to define the state of the random permutations generator. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. Attributes ---------- intercept_hidden_ : array-like, shape (n_components,) Biases of the hidden units. intercept_visible_ : array-like, shape (n_features,) Biases of the visible units. components_ : array-like, shape (n_components, n_features) Weight matrix, where n_features in the number of visible units and n_components is the number of hidden units. Examples -------- >>> import numpy as np >>> from sklearn.neural_network import BernoulliRBM >>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]]) >>> model = BernoulliRBM(n_components=2) >>> model.fit(X) BernoulliRBM(batch_size=10, learning_rate=0.1, n_components=2, n_iter=10, random_state=None, verbose=0) References ---------- [1] Hinton, G. E., Osindero, S. and Teh, Y. A fast learning algorithm for deep belief nets. Neural Computation 18, pp 1527-1554. http://www.cs.toronto.edu/~hinton/absps/fastnc.pdf [2] Tieleman, T. Training Restricted Boltzmann Machines using Approximations to the Likelihood Gradient. International Conference on Machine Learning (ICML) 2008 """ def __init__(self, n_components=256, learning_rate=0.1, batch_size=10, n_iter=10, verbose=0, random_state=None): self.n_components = n_components self.learning_rate = learning_rate self.batch_size = batch_size self.n_iter = n_iter self.verbose = verbose self.random_state = random_state def transform(self, X): """Compute the hidden layer activation probabilities, P(h=1|v=X). Parameters ---------- X : {array-like, sparse matrix} shape (n_samples, n_features) The data to be transformed. Returns ------- h : array, shape (n_samples, n_components) Latent representations of the data. """ check_is_fitted(self, "components_") X = check_array(X, accept_sparse='csr', dtype=np.float64) return self._mean_hiddens(X) def _mean_hiddens(self, v): """Computes the probabilities P(h=1|v). Parameters ---------- v : array-like, shape (n_samples, n_features) Values of the visible layer. Returns ------- h : array-like, shape (n_samples, n_components) Corresponding mean field values for the hidden layer. """ p = safe_sparse_dot(v, self.components_.T) p += self.intercept_hidden_ return expit(p, out=p) def _sample_hiddens(self, v, rng): """Sample from the distribution P(h|v). Parameters ---------- v : array-like, shape (n_samples, n_features) Values of the visible layer to sample from. rng : RandomState Random number generator to use. Returns ------- h : array-like, shape (n_samples, n_components) Values of the hidden layer. """ p = self._mean_hiddens(v) return (rng.random_sample(size=p.shape) < p) def _sample_visibles(self, h, rng): """Sample from the distribution P(v|h). Parameters ---------- h : array-like, shape (n_samples, n_components) Values of the hidden layer to sample from. rng : RandomState Random number generator to use. Returns ------- v : array-like, shape (n_samples, n_features) Values of the visible layer. """ p = np.dot(h, self.components_) p += self.intercept_visible_ expit(p, out=p) return (rng.random_sample(size=p.shape) < p) def _free_energy(self, v): """Computes the free energy F(v) = - log sum_h exp(-E(v,h)). Parameters ---------- v : array-like, shape (n_samples, n_features) Values of the visible layer. Returns ------- free_energy : array-like, shape (n_samples,) The value of the free energy. """ return (- safe_sparse_dot(v, self.intercept_visible_) - np.logaddexp(0, safe_sparse_dot(v, self.components_.T) + self.intercept_hidden_).sum(axis=1)) def gibbs(self, v): """Perform one Gibbs sampling step. Parameters ---------- v : array-like, shape (n_samples, n_features) Values of the visible layer to start from. Returns ------- v_new : array-like, shape (n_samples, n_features) Values of the visible layer after one Gibbs step. """ check_is_fitted(self, "components_") if not hasattr(self, "random_state_"): self.random_state_ = check_random_state(self.random_state) h_ = self._sample_hiddens(v, self.random_state_) v_ = self._sample_visibles(h_, self.random_state_) return v_ def partial_fit(self, X, y=None): """Fit the model to the data X which should contain a partial segment of the data. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data. Returns ------- self : BernoulliRBM The fitted model. """ X = check_array(X, accept_sparse='csr', dtype=np.float64) if not hasattr(self, 'random_state_'): self.random_state_ = check_random_state(self.random_state) if not hasattr(self, 'components_'): self.components_ = np.asarray( self.random_state_.normal( 0, 0.01, (self.n_components, X.shape[1]) ), order='F') if not hasattr(self, 'intercept_hidden_'): self.intercept_hidden_ = np.zeros(self.n_components, ) if not hasattr(self, 'intercept_visible_'): self.intercept_visible_ = np.zeros(X.shape[1], ) if not hasattr(self, 'h_samples_'): self.h_samples_ = np.zeros((self.batch_size, self.n_components)) self._fit(X, self.random_state_) def _fit(self, v_pos, rng): """Inner fit for one mini-batch. Adjust the parameters to maximize the likelihood of v using Stochastic Maximum Likelihood (SML). Parameters ---------- v_pos : array-like, shape (n_samples, n_features) The data to use for training. rng : RandomState Random number generator to use for sampling. """ h_pos = self._mean_hiddens(v_pos) v_neg = self._sample_visibles(self.h_samples_, rng) h_neg = self._mean_hiddens(v_neg) lr = float(self.learning_rate) / v_pos.shape[0] update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T update -= np.dot(h_neg.T, v_neg) self.components_ += lr * update self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0)) self.intercept_visible_ += lr * (np.asarray( v_pos.sum(axis=0)).squeeze() - v_neg.sum(axis=0)) h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial self.h_samples_ = np.floor(h_neg, h_neg) def score_samples(self, X): """Compute the pseudo-likelihood of X. Parameters ---------- X : {array-like, sparse matrix} shape (n_samples, n_features) Values of the visible layer. Must be all-boolean (not checked). Returns ------- pseudo_likelihood : array-like, shape (n_samples,) Value of the pseudo-likelihood (proxy for likelihood). Notes ----- This method is not deterministic: it computes a quantity called the free energy on X, then on a randomly corrupted version of X, and returns the log of the logistic function of the difference. """ check_is_fitted(self, "components_") v = check_array(X, accept_sparse='csr') rng = check_random_state(self.random_state) # Randomly corrupt one feature in each sample in v. ind = (np.arange(v.shape[0]), rng.randint(0, v.shape[1], v.shape[0])) if issparse(v): data = -2 * v[ind] + 1 v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape) else: v_ = v.copy() v_[ind] = 1 - v_[ind] fe = self._free_energy(v) fe_ = self._free_energy(v_) return v.shape[1] * log_logistic(fe_ - fe) def fit(self, X, y=None): """Fit the model to the data X. Parameters ---------- X : {array-like, sparse matrix} shape (n_samples, n_features) Training data. Returns ------- self : BernoulliRBM The fitted model. """ X = check_array(X, accept_sparse='csr', dtype=np.float64) n_samples = X.shape[0] rng = check_random_state(self.random_state) self.components_ = np.asarray( rng.normal(0, 0.01, (self.n_components, X.shape[1])), order='F') self.intercept_hidden_ = np.zeros(self.n_components, ) self.intercept_visible_ = np.zeros(X.shape[1], ) self.h_samples_ = np.zeros((self.batch_size, self.n_components)) n_batches = int(np.ceil(float(n_samples) / self.batch_size)) batch_slices = list(gen_even_slices(n_batches * self.batch_size, n_batches, n_samples)) verbose = self.verbose begin = time.time() for iteration in xrange(1, self.n_iter + 1): for batch_slice in batch_slices: self._fit(X[batch_slice], rng) if verbose: end = time.time() print("[%s] Iteration %d, pseudo-likelihood = %.2f," " time = %.2fs" % (type(self).__name__, iteration, self.score_samples(X).mean(), end - begin)) begin = end return self
bsd-3-clause
jorik041/scikit-learn
sklearn/linear_model/randomized_l1.py
95
23365
""" Randomized Lasso/Logistic: feature selection based on Lasso and sparse Logistic Regression """ # Author: Gael Varoquaux, Alexandre Gramfort # # License: BSD 3 clause import itertools from abc import ABCMeta, abstractmethod import warnings import numpy as np from scipy.sparse import issparse from scipy import sparse from scipy.interpolate import interp1d from .base import center_data from ..base import BaseEstimator, TransformerMixin from ..externals import six from ..externals.joblib import Memory, Parallel, delayed from ..utils import (as_float_array, check_random_state, check_X_y, check_array, safe_mask, ConvergenceWarning) from ..utils.validation import check_is_fitted from .least_angle import lars_path, LassoLarsIC from .logistic import LogisticRegression ############################################################################### # Randomized linear model: feature selection def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200, n_jobs=1, verbose=False, pre_dispatch='3*n_jobs', random_state=None, sample_fraction=.75, **params): random_state = check_random_state(random_state) # We are generating 1 - weights, and not weights n_samples, n_features = X.shape if not (0 < scaling < 1): raise ValueError( "'scaling' should be between 0 and 1. Got %r instead." % scaling) scaling = 1. - scaling scores_ = 0.0 for active_set in Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch)( delayed(estimator_func)( X, y, weights=scaling * random_state.random_integers( 0, 1, size=(n_features,)), mask=(random_state.rand(n_samples) < sample_fraction), verbose=max(0, verbose - 1), **params) for _ in range(n_resampling)): scores_ += active_set scores_ /= n_resampling return scores_ class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator, TransformerMixin)): """Base class to implement randomized linear models for feature selection This implements the strategy by Meinshausen and Buhlman: stability selection with randomized sampling, and random re-weighting of the penalty. """ @abstractmethod def __init__(self): pass _center_data = staticmethod(center_data) def fit(self, X, y): """Fit the model using X, y as training data. Parameters ---------- X : array-like, sparse matrix shape = [n_samples, n_features] Training data. y : array-like, shape = [n_samples] Target values. Returns ------- self : object Returns an instance of self. """ X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], y_numeric=True) X = as_float_array(X, copy=False) n_samples, n_features = X.shape X, y, X_mean, y_mean, X_std = self._center_data(X, y, self.fit_intercept, self.normalize) estimator_func, params = self._make_estimator_and_params(X, y) memory = self.memory if isinstance(memory, six.string_types): memory = Memory(cachedir=memory) scores_ = memory.cache( _resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch'] )( estimator_func, X, y, scaling=self.scaling, n_resampling=self.n_resampling, n_jobs=self.n_jobs, verbose=self.verbose, pre_dispatch=self.pre_dispatch, random_state=self.random_state, sample_fraction=self.sample_fraction, **params) if scores_.ndim == 1: scores_ = scores_[:, np.newaxis] self.all_scores_ = scores_ self.scores_ = np.max(self.all_scores_, axis=1) return self def _make_estimator_and_params(self, X, y): """Return the parameters passed to the estimator""" raise NotImplementedError def get_support(self, indices=False): """Return a mask, or list, of the features/indices selected.""" check_is_fitted(self, 'scores_') mask = self.scores_ > self.selection_threshold return mask if not indices else np.where(mask)[0] # XXX: the two function below are copy/pasted from feature_selection, # Should we add an intermediate base class? def transform(self, X): """Transform a new matrix using the selected features""" mask = self.get_support() X = check_array(X) if len(mask) != X.shape[1]: raise ValueError("X has a different shape than during fitting.") return check_array(X)[:, safe_mask(X, mask)] def inverse_transform(self, X): """Transform a new matrix using the selected features""" support = self.get_support() if X.ndim == 1: X = X[None, :] Xt = np.zeros((X.shape[0], support.size)) Xt[:, support] = X return Xt ############################################################################### # Randomized lasso: regression settings def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False, precompute=False, eps=np.finfo(np.float).eps, max_iter=500): X = X[safe_mask(X, mask)] y = y[mask] # Center X and y to avoid fit the intercept X -= X.mean(axis=0) y -= y.mean() alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float)) X = (1 - weights) * X with warnings.catch_warnings(): warnings.simplefilter('ignore', ConvergenceWarning) alphas_, _, coef_ = lars_path(X, y, Gram=precompute, copy_X=False, copy_Gram=False, alpha_min=np.min(alpha), method='lasso', verbose=verbose, max_iter=max_iter, eps=eps) if len(alpha) > 1: if len(alphas_) > 1: # np.min(alpha) < alpha_min interpolator = interp1d(alphas_[::-1], coef_[:, ::-1], bounds_error=False, fill_value=0.) scores = (interpolator(alpha) != 0.0) else: scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool) else: scores = coef_[:, -1] != 0.0 return scores class RandomizedLasso(BaseRandomizedLinearModel): """Randomized Lasso. Randomized Lasso works by resampling the train data and computing a Lasso on each resampling. In short, the features selected more often are good features. It is also known as stability selection. Read more in the :ref:`User Guide <randomized_l1>`. Parameters ---------- alpha : float, 'aic', or 'bic', optional The regularization parameter alpha parameter in the Lasso. Warning: this is not the alpha parameter in the stability selection article which is scaling. scaling : float, optional The alpha parameter in the stability selection article used to randomly scale the features. Should be between 0 and 1. sample_fraction : float, optional The fraction of samples to be used in each randomized design. Should be between 0 and 1. If 1, all samples are used. n_resampling : int, optional Number of randomized models. selection_threshold: float, optional The score above which features should be selected. fit_intercept : boolean, optional whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). verbose : boolean or integer, optional Sets the verbosity amount normalize : boolean, optional, default True If True, the regressors X will be normalized before regression. precompute : True | False | 'auto' Whether to use a precomputed Gram matrix to speed up calculations. If set to 'auto' let us decide. The Gram matrix can also be passed as argument. max_iter : integer, optional Maximum number of iterations to perform in the Lars algorithm. eps : float, optional The machine-precision regularization in the computation of the Cholesky diagonal factors. Increase this for very ill-conditioned systems. Unlike the 'tol' parameter in some iterative optimization-based algorithms, this parameter does not control the tolerance of the optimization. n_jobs : integer, optional Number of CPUs to use during the resampling. If '-1', use all the CPUs random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' memory : Instance of joblib.Memory or string Used for internal caching. By default, no caching is done. If a string is given, it is the path to the caching directory. Attributes ---------- scores_ : array, shape = [n_features] Feature scores between 0 and 1. all_scores_ : array, shape = [n_features, n_reg_parameter] Feature scores between 0 and 1 for all values of the regularization \ parameter. The reference article suggests ``scores_`` is the max of \ ``all_scores_``. Examples -------- >>> from sklearn.linear_model import RandomizedLasso >>> randomized_lasso = RandomizedLasso() Notes ----- See examples/linear_model/plot_sparse_recovery.py for an example. References ---------- Stability selection Nicolai Meinshausen, Peter Buhlmann Journal of the Royal Statistical Society: Series B Volume 72, Issue 4, pages 417-473, September 2010 DOI: 10.1111/j.1467-9868.2010.00740.x See also -------- RandomizedLogisticRegression, LogisticRegression """ def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75, n_resampling=200, selection_threshold=.25, fit_intercept=True, verbose=False, normalize=True, precompute='auto', max_iter=500, eps=np.finfo(np.float).eps, random_state=None, n_jobs=1, pre_dispatch='3*n_jobs', memory=Memory(cachedir=None, verbose=0)): self.alpha = alpha self.scaling = scaling self.sample_fraction = sample_fraction self.n_resampling = n_resampling self.fit_intercept = fit_intercept self.max_iter = max_iter self.verbose = verbose self.normalize = normalize self.precompute = precompute self.eps = eps self.random_state = random_state self.n_jobs = n_jobs self.selection_threshold = selection_threshold self.pre_dispatch = pre_dispatch self.memory = memory def _make_estimator_and_params(self, X, y): assert self.precompute in (True, False, None, 'auto') alpha = self.alpha if alpha in ('aic', 'bic'): model = LassoLarsIC(precompute=self.precompute, criterion=self.alpha, max_iter=self.max_iter, eps=self.eps) model.fit(X, y) self.alpha_ = alpha = model.alpha_ return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter, eps=self.eps, precompute=self.precompute) ############################################################################### # Randomized logistic: classification settings def _randomized_logistic(X, y, weights, mask, C=1., verbose=False, fit_intercept=True, tol=1e-3): X = X[safe_mask(X, mask)] y = y[mask] if issparse(X): size = len(weights) weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size)) X = X * weight_dia else: X *= (1 - weights) C = np.atleast_1d(np.asarray(C, dtype=np.float)) scores = np.zeros((X.shape[1], len(C)), dtype=np.bool) for this_C, this_scores in zip(C, scores.T): # XXX : would be great to do it with a warm_start ... clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False, fit_intercept=fit_intercept) clf.fit(X, y) this_scores[:] = np.any( np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0) return scores class RandomizedLogisticRegression(BaseRandomizedLinearModel): """Randomized Logistic Regression Randomized Regression works by resampling the train data and computing a LogisticRegression on each resampling. In short, the features selected more often are good features. It is also known as stability selection. Read more in the :ref:`User Guide <randomized_l1>`. Parameters ---------- C : float, optional, default=1 The regularization parameter C in the LogisticRegression. scaling : float, optional, default=0.5 The alpha parameter in the stability selection article used to randomly scale the features. Should be between 0 and 1. sample_fraction : float, optional, default=0.75 The fraction of samples to be used in each randomized design. Should be between 0 and 1. If 1, all samples are used. n_resampling : int, optional, default=200 Number of randomized models. selection_threshold : float, optional, default=0.25 The score above which features should be selected. fit_intercept : boolean, optional, default=True whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). verbose : boolean or integer, optional Sets the verbosity amount normalize : boolean, optional, default=True If True, the regressors X will be normalized before regression. tol : float, optional, default=1e-3 tolerance for stopping criteria of LogisticRegression n_jobs : integer, optional Number of CPUs to use during the resampling. If '-1', use all the CPUs random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. pre_dispatch : int, or string, optional Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A string, giving an expression as a function of n_jobs, as in '2*n_jobs' memory : Instance of joblib.Memory or string Used for internal caching. By default, no caching is done. If a string is given, it is the path to the caching directory. Attributes ---------- scores_ : array, shape = [n_features] Feature scores between 0 and 1. all_scores_ : array, shape = [n_features, n_reg_parameter] Feature scores between 0 and 1 for all values of the regularization \ parameter. The reference article suggests ``scores_`` is the max \ of ``all_scores_``. Examples -------- >>> from sklearn.linear_model import RandomizedLogisticRegression >>> randomized_logistic = RandomizedLogisticRegression() Notes ----- See examples/linear_model/plot_sparse_recovery.py for an example. References ---------- Stability selection Nicolai Meinshausen, Peter Buhlmann Journal of the Royal Statistical Society: Series B Volume 72, Issue 4, pages 417-473, September 2010 DOI: 10.1111/j.1467-9868.2010.00740.x See also -------- RandomizedLasso, Lasso, ElasticNet """ def __init__(self, C=1, scaling=.5, sample_fraction=.75, n_resampling=200, selection_threshold=.25, tol=1e-3, fit_intercept=True, verbose=False, normalize=True, random_state=None, n_jobs=1, pre_dispatch='3*n_jobs', memory=Memory(cachedir=None, verbose=0)): self.C = C self.scaling = scaling self.sample_fraction = sample_fraction self.n_resampling = n_resampling self.fit_intercept = fit_intercept self.verbose = verbose self.normalize = normalize self.tol = tol self.random_state = random_state self.n_jobs = n_jobs self.selection_threshold = selection_threshold self.pre_dispatch = pre_dispatch self.memory = memory def _make_estimator_and_params(self, X, y): params = dict(C=self.C, tol=self.tol, fit_intercept=self.fit_intercept) return _randomized_logistic, params def _center_data(self, X, y, fit_intercept, normalize=False): """Center the data in X but not in y""" X, _, Xmean, _, X_std = center_data(X, y, fit_intercept, normalize=normalize) return X, y, Xmean, y, X_std ############################################################################### # Stability paths def _lasso_stability_path(X, y, mask, weights, eps): "Inner loop of lasso_stability_path" X = X * weights[np.newaxis, :] X = X[safe_mask(X, mask), :] y = y[mask] alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0] alpha_min = eps * alpha_max # set for early stopping in path with warnings.catch_warnings(): warnings.simplefilter('ignore', ConvergenceWarning) alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False, alpha_min=alpha_min) # Scale alpha by alpha_max alphas /= alphas[0] # Sort alphas in assending order alphas = alphas[::-1] coefs = coefs[:, ::-1] # Get rid of the alphas that are too small mask = alphas >= eps # We also want to keep the first one: it should be close to the OLS # solution mask[0] = True alphas = alphas[mask] coefs = coefs[:, mask] return alphas, coefs def lasso_stability_path(X, y, scaling=0.5, random_state=None, n_resampling=200, n_grid=100, sample_fraction=0.75, eps=4 * np.finfo(np.float).eps, n_jobs=1, verbose=False): """Stabiliy path based on randomized Lasso estimates Read more in the :ref:`User Guide <randomized_l1>`. Parameters ---------- X : array-like, shape = [n_samples, n_features] training data. y : array-like, shape = [n_samples] target values. scaling : float, optional, default=0.5 The alpha parameter in the stability selection article used to randomly scale the features. Should be between 0 and 1. random_state : integer or numpy.random.RandomState, optional The generator used to randomize the design. n_resampling : int, optional, default=200 Number of randomized models. n_grid : int, optional, default=100 Number of grid points. The path is linearly reinterpolated on a grid between 0 and 1 before computing the scores. sample_fraction : float, optional, default=0.75 The fraction of samples to be used in each randomized design. Should be between 0 and 1. If 1, all samples are used. eps : float, optional Smallest value of alpha / alpha_max considered n_jobs : integer, optional Number of CPUs to use during the resampling. If '-1', use all the CPUs verbose : boolean or integer, optional Sets the verbosity amount Returns ------- alphas_grid : array, shape ~ [n_grid] The grid points between 0 and 1: alpha/alpha_max scores_path : array, shape = [n_features, n_grid] The scores for each feature along the path. Notes ----- See examples/linear_model/plot_sparse_recovery.py for an example. """ rng = check_random_state(random_state) if not (0 < scaling < 1): raise ValueError("Parameter 'scaling' should be between 0 and 1." " Got %r instead." % scaling) n_samples, n_features = X.shape paths = Parallel(n_jobs=n_jobs, verbose=verbose)( delayed(_lasso_stability_path)( X, y, mask=rng.rand(n_samples) < sample_fraction, weights=1. - scaling * rng.random_integers(0, 1, size=(n_features,)), eps=eps) for k in range(n_resampling)) all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths])))) # Take approximately n_grid values stride = int(max(1, int(len(all_alphas) / float(n_grid)))) all_alphas = all_alphas[::stride] if not all_alphas[-1] == 1: all_alphas.append(1.) all_alphas = np.array(all_alphas) scores_path = np.zeros((n_features, len(all_alphas))) for alphas, coefs in paths: if alphas[0] != 0: alphas = np.r_[0, alphas] coefs = np.c_[np.ones((n_features, 1)), coefs] if alphas[-1] != all_alphas[-1]: alphas = np.r_[alphas, all_alphas[-1]] coefs = np.c_[coefs, np.zeros((n_features, 1))] scores_path += (interp1d(alphas, coefs, kind='nearest', bounds_error=False, fill_value=0, axis=-1)(all_alphas) != 0) scores_path /= n_resampling return all_alphas, scores_path
bsd-3-clause
jingxiang-li/kaggle-yelp
model/level3_model_rf.py
1
5669
from __future__ import division from __future__ import absolute_import from __future__ import print_function from __future__ import unicode_literals import numpy as np from sklearn.ensemble import RandomForestClassifier from sklearn.calibration import CalibratedClassifierCV from sklearn.metrics import f1_score import argparse from os import path import os from hyperopt import fmin, tpe, hp, STATUS_OK, Trials from utils import * import pickle np.random.seed(54568464) def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('--yix', type=int, default=0) return parser.parse_args() # functions for hyperparameters optimization class Score: def __init__(self, X, y): self.y = y self.X = X def get_score(self, params): params['n_estimators'] = int(params['n_estimators']) params['max_depth'] = int(params['max_depth']) params['min_samples_split'] = int(params['min_samples_split']) params['min_samples_leaf'] = int(params['min_samples_leaf']) params['n_estimators'] = int(params['n_estimators']) print('Training with params:') print(params) # cross validation here scores = [] for train_ix, test_ix in makeKFold(5, self.y, 1): X_train, y_train = self.X[train_ix, :], self.y[train_ix] X_test, y_test = self.X[test_ix, :], self.y[test_ix] weight = y_train.shape[0] / (2 * np.bincount(y_train)) sample_weight = np.array([weight[i] for i in y_train]) clf = RandomForestClassifier(**params) cclf = CalibratedClassifierCV(base_estimator=clf, method='isotonic', cv=makeKFold(3, y_train, 1)) cclf.fit(X_train, y_train, sample_weight) pred = cclf.predict(X_test) scores.append(f1_score(y_true=y_test, y_pred=pred)) print(scores) score = np.mean(scores) print(score) return {'loss': -score, 'status': STATUS_OK} def optimize(trials, X, y, max_evals): space = { 'n_estimators': hp.quniform('n_estimators', 100, 500, 50), 'criterion': hp.choice('criterion', ['gini', 'entropy']), 'max_depth': hp.quniform('max_depth', 1, 7, 1), 'min_samples_split': hp.quniform('min_samples_split', 1, 9, 2), 'min_samples_leaf': hp.quniform('min_samples_leaf', 1, 5, 1), 'bootstrap': True, 'oob_score': True, 'n_jobs': -1 } s = Score(X, y) best = fmin(s.get_score, space, algo=tpe.suggest, trials=trials, max_evals=max_evals ) best['n_estimators'] = int(best['n_estimators']) best['max_depth'] = int(best['max_depth']) best['min_samples_split'] = int(best['min_samples_split']) best['min_samples_leaf'] = int(best['min_samples_leaf']) best['n_estimators'] = int(best['n_estimators']) best['criterion'] = ['gini', 'entropy'][best['criterion']] best['bootstrap'] = True best['oob_score'] = True best['n_jobs'] = -1 del s return best def out_fold_pred(params, X, y): # cross validation here preds = np.zeros((y.shape[0])) for train_ix, test_ix in makeKFold(5, y, 1): X_train, y_train = X[train_ix, :], y[train_ix] X_test = X[test_ix, :] weight = y_train.shape[0] / (2 * np.bincount(y_train)) sample_weight = np.array([weight[i] for i in y_train]) clf = RandomForestClassifier(**params) cclf = CalibratedClassifierCV(base_estimator=clf, method='isotonic', cv=makeKFold(3, y_train, 1)) cclf.fit(X_train, y_train, sample_weight) pred = cclf.predict_proba(X_test)[:, 1] preds[test_ix] = pred return preds def get_model(params, X, y): clf = RandomForestClassifier(**params) cclf = CalibratedClassifierCV(base_estimator=clf, method='isotonic', cv=makeKFold(3, y, 1)) weight = y.shape[0] / (2 * np.bincount(y)) sample_weight = np.array([weight[i] for i in y]) cclf.fit(X, y, sample_weight) return cclf args = parse_args() data_dir = '../level3-feature/' + str(args.yix) X_train = np.load(path.join(data_dir, 'X_train.npy')) X_test = np.load(path.join(data_dir, 'X_test.npy')) y_train = np.load(path.join(data_dir, 'y_train.npy')) print(X_train.shape, X_test.shape, y_train.shape) X_train_ext = np.load('../extra_ftrs/' + str(args.yix) + '/X_train_ext.npy') X_test_ext = np.load('../extra_ftrs/' + str(args.yix) + '/X_test_ext.npy') print(X_train_ext.shape, X_test_ext.shape) X_train = np.hstack((X_train, X_train_ext)) X_test = np.hstack((X_test, X_test_ext)) print('Add Extra') print(X_train.shape, X_test.shape, y_train.shape) # Now we have X_train, X_test, y_train trials = Trials() params = optimize(trials, X_train, y_train, 50) out_fold = out_fold_pred(params, X_train, y_train) clf = get_model(params, X_train, y_train) preds = clf.predict_proba(X_test)[:, 1] save_dir = '../level3-model-final/' + str(args.yix) print(save_dir) if not path.exists(save_dir): os.makedirs(save_dir) # save model, parameter, outFold_pred, pred with open(path.join(save_dir, 'model_rf.pkl'), 'wb') as f_model: pickle.dump(clf.calibrated_classifiers_, f_model) with open(path.join(save_dir, 'param_rf.pkl'), 'wb') as f_param: pickle.dump(params, f_param) np.save(path.join(save_dir, 'pred_rf.npy'), preds) np.save(path.join(save_dir, 'outFold_rf.npy'), out_fold)
mit
JeanKossaifi/scikit-learn
sklearn/tree/tests/test_tree.py
48
47506
""" Testing for the tree module (sklearn.tree). """ import pickle from functools import partial from itertools import product import platform import numpy as np from scipy.sparse import csc_matrix from scipy.sparse import csr_matrix from scipy.sparse import coo_matrix from sklearn.random_projection import sparse_random_matrix from sklearn.metrics import accuracy_score from sklearn.metrics import mean_squared_error from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_in from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_greater_equal from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_true from sklearn.utils.testing import raises from sklearn.utils.validation import check_random_state from sklearn.utils.validation import NotFittedError from sklearn.utils.testing import ignore_warnings from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeRegressor from sklearn.tree import ExtraTreeClassifier from sklearn.tree import ExtraTreeRegressor from sklearn import tree from sklearn.tree.tree import SPARSE_SPLITTERS from sklearn.tree._tree import TREE_LEAF from sklearn import datasets from sklearn.preprocessing._weights import _balance_weights CLF_CRITERIONS = ("gini", "entropy") REG_CRITERIONS = ("mse", ) CLF_TREES = { "DecisionTreeClassifier": DecisionTreeClassifier, "Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier, splitter="presort-best"), "ExtraTreeClassifier": ExtraTreeClassifier, } REG_TREES = { "DecisionTreeRegressor": DecisionTreeRegressor, "Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor, splitter="presort-best"), "ExtraTreeRegressor": ExtraTreeRegressor, } ALL_TREES = dict() ALL_TREES.update(CLF_TREES) ALL_TREES.update(REG_TREES) SPARSE_TREES = [name for name, Tree in ALL_TREES.items() if Tree().splitter in SPARSE_SPLITTERS] X_small = np.array([ [0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ], [0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ], [-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ], [-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ], [-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ], [-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ], [2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ], [2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ], [2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ], [2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ], [2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ], [2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ], [2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ], [1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ], [3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ], [2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ], [2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ], [2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ], [2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ], [2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ], [2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ], [1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ], [3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]]) y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0] y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1, 0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0] # toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] y = [-1, -1, -1, 1, 1, 1] T = [[-1, -1], [2, 2], [3, 2]] true_result = [-1, 1, 1] # also load the iris dataset # and randomly permute it iris = datasets.load_iris() rng = np.random.RandomState(1) perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] # also load the boston dataset # and randomly permute it boston = datasets.load_boston() perm = rng.permutation(boston.target.size) boston.data = boston.data[perm] boston.target = boston.target[perm] digits = datasets.load_digits() perm = rng.permutation(digits.target.size) digits.data = digits.data[perm] digits.target = digits.target[perm] random_state = check_random_state(0) X_multilabel, y_multilabel = datasets.make_multilabel_classification( random_state=0, n_samples=30, n_features=10) X_sparse_pos = random_state.uniform(size=(20, 5)) X_sparse_pos[X_sparse_pos <= 0.8] = 0. y_random = random_state.randint(0, 4, size=(20, )) X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0) DATASETS = { "iris": {"X": iris.data, "y": iris.target}, "boston": {"X": boston.data, "y": boston.target}, "digits": {"X": digits.data, "y": digits.target}, "toy": {"X": X, "y": y}, "clf_small": {"X": X_small, "y": y_small}, "reg_small": {"X": X_small, "y": y_small_reg}, "multilabel": {"X": X_multilabel, "y": y_multilabel}, "sparse-pos": {"X": X_sparse_pos, "y": y_random}, "sparse-neg": {"X": - X_sparse_pos, "y": y_random}, "sparse-mix": {"X": X_sparse_mix, "y": y_random}, "zeros": {"X": np.zeros((20, 3)), "y": y_random} } for name in DATASETS: DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"]) def assert_tree_equal(d, s, message): assert_equal(s.node_count, d.node_count, "{0}: inequal number of node ({1} != {2})" "".format(message, s.node_count, d.node_count)) assert_array_equal(d.children_right, s.children_right, message + ": inequal children_right") assert_array_equal(d.children_left, s.children_left, message + ": inequal children_left") external = d.children_right == TREE_LEAF internal = np.logical_not(external) assert_array_equal(d.feature[internal], s.feature[internal], message + ": inequal features") assert_array_equal(d.threshold[internal], s.threshold[internal], message + ": inequal threshold") assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(), message + ": inequal sum(n_node_samples)") assert_array_equal(d.n_node_samples, s.n_node_samples, message + ": inequal n_node_samples") assert_almost_equal(d.impurity, s.impurity, err_msg=message + ": inequal impurity") assert_array_almost_equal(d.value[external], s.value[external], err_msg=message + ": inequal value") def test_classification_toy(): # Check classification on a toy dataset. for name, Tree in CLF_TREES.items(): clf = Tree(random_state=0) clf.fit(X, y) assert_array_equal(clf.predict(T), true_result, "Failed with {0}".format(name)) clf = Tree(max_features=1, random_state=1) clf.fit(X, y) assert_array_equal(clf.predict(T), true_result, "Failed with {0}".format(name)) def test_weighted_classification_toy(): # Check classification on a weighted toy dataset. for name, Tree in CLF_TREES.items(): clf = Tree(random_state=0) clf.fit(X, y, sample_weight=np.ones(len(X))) assert_array_equal(clf.predict(T), true_result, "Failed with {0}".format(name)) clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5) assert_array_equal(clf.predict(T), true_result, "Failed with {0}".format(name)) def test_regression_toy(): # Check regression on a toy dataset. for name, Tree in REG_TREES.items(): reg = Tree(random_state=1) reg.fit(X, y) assert_almost_equal(reg.predict(T), true_result, err_msg="Failed with {0}".format(name)) clf = Tree(max_features=1, random_state=1) clf.fit(X, y) assert_almost_equal(reg.predict(T), true_result, err_msg="Failed with {0}".format(name)) def test_xor(): # Check on a XOR problem y = np.zeros((10, 10)) y[:5, :5] = 1 y[5:, 5:] = 1 gridx, gridy = np.indices(y.shape) X = np.vstack([gridx.ravel(), gridy.ravel()]).T y = y.ravel() for name, Tree in CLF_TREES.items(): clf = Tree(random_state=0) clf.fit(X, y) assert_equal(clf.score(X, y), 1.0, "Failed with {0}".format(name)) clf = Tree(random_state=0, max_features=1) clf.fit(X, y) assert_equal(clf.score(X, y), 1.0, "Failed with {0}".format(name)) def test_iris(): # Check consistency on dataset iris. for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS): clf = Tree(criterion=criterion, random_state=0) clf.fit(iris.data, iris.target) score = accuracy_score(clf.predict(iris.data), iris.target) assert_greater(score, 0.9, "Failed with {0}, criterion = {1} and score = {2}" "".format(name, criterion, score)) clf = Tree(criterion=criterion, max_features=2, random_state=0) clf.fit(iris.data, iris.target) score = accuracy_score(clf.predict(iris.data), iris.target) assert_greater(score, 0.5, "Failed with {0}, criterion = {1} and score = {2}" "".format(name, criterion, score)) def test_boston(): # Check consistency on dataset boston house prices. for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS): reg = Tree(criterion=criterion, random_state=0) reg.fit(boston.data, boston.target) score = mean_squared_error(boston.target, reg.predict(boston.data)) assert_less(score, 1, "Failed with {0}, criterion = {1} and score = {2}" "".format(name, criterion, score)) # using fewer features reduces the learning ability of this tree, # but reduces training time. reg = Tree(criterion=criterion, max_features=6, random_state=0) reg.fit(boston.data, boston.target) score = mean_squared_error(boston.target, reg.predict(boston.data)) assert_less(score, 2, "Failed with {0}, criterion = {1} and score = {2}" "".format(name, criterion, score)) def test_probability(): # Predict probabilities using DecisionTreeClassifier. for name, Tree in CLF_TREES.items(): clf = Tree(max_depth=1, max_features=1, random_state=42) clf.fit(iris.data, iris.target) prob_predict = clf.predict_proba(iris.data) assert_array_almost_equal(np.sum(prob_predict, 1), np.ones(iris.data.shape[0]), err_msg="Failed with {0}".format(name)) assert_array_equal(np.argmax(prob_predict, 1), clf.predict(iris.data), err_msg="Failed with {0}".format(name)) assert_almost_equal(clf.predict_proba(iris.data), np.exp(clf.predict_log_proba(iris.data)), 8, err_msg="Failed with {0}".format(name)) def test_arrayrepr(): # Check the array representation. # Check resize X = np.arange(10000)[:, np.newaxis] y = np.arange(10000) for name, Tree in REG_TREES.items(): reg = Tree(max_depth=None, random_state=0) reg.fit(X, y) def test_pure_set(): # Check when y is pure. X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] y = [1, 1, 1, 1, 1, 1] for name, TreeClassifier in CLF_TREES.items(): clf = TreeClassifier(random_state=0) clf.fit(X, y) assert_array_equal(clf.predict(X), y, err_msg="Failed with {0}".format(name)) for name, TreeRegressor in REG_TREES.items(): reg = TreeRegressor(random_state=0) reg.fit(X, y) assert_almost_equal(clf.predict(X), y, err_msg="Failed with {0}".format(name)) def test_numerical_stability(): # Check numerical stability. X = np.array([ [152.08097839, 140.40744019, 129.75102234, 159.90493774], [142.50700378, 135.81935120, 117.82884979, 162.75781250], [127.28772736, 140.40744019, 129.75102234, 159.90493774], [132.37025452, 143.71923828, 138.35694885, 157.84558105], [103.10237122, 143.71928406, 138.35696411, 157.84559631], [127.71276855, 143.71923828, 138.35694885, 157.84558105], [120.91514587, 140.40744019, 129.75102234, 159.90493774]]) y = np.array( [1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521]) with np.errstate(all="raise"): for name, Tree in REG_TREES.items(): reg = Tree(random_state=0) reg.fit(X, y) reg.fit(X, -y) reg.fit(-X, y) reg.fit(-X, -y) def test_importances(): # Check variable importances. X, y = datasets.make_classification(n_samples=2000, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, shuffle=False, random_state=0) for name, Tree in CLF_TREES.items(): clf = Tree(random_state=0) clf.fit(X, y) importances = clf.feature_importances_ n_important = np.sum(importances > 0.1) assert_equal(importances.shape[0], 10, "Failed with {0}".format(name)) assert_equal(n_important, 3, "Failed with {0}".format(name)) X_new = clf.transform(X, threshold="mean") assert_less(0, X_new.shape[1], "Failed with {0}".format(name)) assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name)) # Check on iris that importances are the same for all builders clf = DecisionTreeClassifier(random_state=0) clf.fit(iris.data, iris.target) clf2 = DecisionTreeClassifier(random_state=0, max_leaf_nodes=len(iris.data)) clf2.fit(iris.data, iris.target) assert_array_equal(clf.feature_importances_, clf2.feature_importances_) @raises(ValueError) def test_importances_raises(): # Check if variable importance before fit raises ValueError. clf = DecisionTreeClassifier() clf.feature_importances_ def test_importances_gini_equal_mse(): # Check that gini is equivalent to mse for binary output variable X, y = datasets.make_classification(n_samples=2000, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, shuffle=False, random_state=0) # The gini index and the mean square error (variance) might differ due # to numerical instability. Since those instabilities mainly occurs at # high tree depth, we restrict this maximal depth. clf = DecisionTreeClassifier(criterion="gini", max_depth=5, random_state=0).fit(X, y) reg = DecisionTreeRegressor(criterion="mse", max_depth=5, random_state=0).fit(X, y) assert_almost_equal(clf.feature_importances_, reg.feature_importances_) assert_array_equal(clf.tree_.feature, reg.tree_.feature) assert_array_equal(clf.tree_.children_left, reg.tree_.children_left) assert_array_equal(clf.tree_.children_right, reg.tree_.children_right) assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples) def test_max_features(): # Check max_features. for name, TreeRegressor in REG_TREES.items(): reg = TreeRegressor(max_features="auto") reg.fit(boston.data, boston.target) assert_equal(reg.max_features_, boston.data.shape[1]) for name, TreeClassifier in CLF_TREES.items(): clf = TreeClassifier(max_features="auto") clf.fit(iris.data, iris.target) assert_equal(clf.max_features_, 2) for name, TreeEstimator in ALL_TREES.items(): est = TreeEstimator(max_features="sqrt") est.fit(iris.data, iris.target) assert_equal(est.max_features_, int(np.sqrt(iris.data.shape[1]))) est = TreeEstimator(max_features="log2") est.fit(iris.data, iris.target) assert_equal(est.max_features_, int(np.log2(iris.data.shape[1]))) est = TreeEstimator(max_features=1) est.fit(iris.data, iris.target) assert_equal(est.max_features_, 1) est = TreeEstimator(max_features=3) est.fit(iris.data, iris.target) assert_equal(est.max_features_, 3) est = TreeEstimator(max_features=0.01) est.fit(iris.data, iris.target) assert_equal(est.max_features_, 1) est = TreeEstimator(max_features=0.5) est.fit(iris.data, iris.target) assert_equal(est.max_features_, int(0.5 * iris.data.shape[1])) est = TreeEstimator(max_features=1.0) est.fit(iris.data, iris.target) assert_equal(est.max_features_, iris.data.shape[1]) est = TreeEstimator(max_features=None) est.fit(iris.data, iris.target) assert_equal(est.max_features_, iris.data.shape[1]) # use values of max_features that are invalid est = TreeEstimator(max_features=10) assert_raises(ValueError, est.fit, X, y) est = TreeEstimator(max_features=-1) assert_raises(ValueError, est.fit, X, y) est = TreeEstimator(max_features=0.0) assert_raises(ValueError, est.fit, X, y) est = TreeEstimator(max_features=1.5) assert_raises(ValueError, est.fit, X, y) est = TreeEstimator(max_features="foobar") assert_raises(ValueError, est.fit, X, y) def test_error(): # Test that it gives proper exception on deficient input. for name, TreeEstimator in CLF_TREES.items(): # predict before fit est = TreeEstimator() assert_raises(NotFittedError, est.predict_proba, X) est.fit(X, y) X2 = [[-2, -1, 1]] # wrong feature shape for sample assert_raises(ValueError, est.predict_proba, X2) for name, TreeEstimator in ALL_TREES.items(): # Invalid values for parameters assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y) assert_raises(ValueError, TreeEstimator(min_weight_fraction_leaf=-1).fit, X, y) assert_raises(ValueError, TreeEstimator(min_weight_fraction_leaf=0.51).fit, X, y) assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit, X, y) assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y) assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y) # Wrong dimensions est = TreeEstimator() y2 = y[:-1] assert_raises(ValueError, est.fit, X, y2) # Test with arrays that are non-contiguous. Xf = np.asfortranarray(X) est = TreeEstimator() est.fit(Xf, y) assert_almost_equal(est.predict(T), true_result) # predict before fitting est = TreeEstimator() assert_raises(NotFittedError, est.predict, T) # predict on vector with different dims est.fit(X, y) t = np.asarray(T) assert_raises(ValueError, est.predict, t[:, 1:]) # wrong sample shape Xt = np.array(X).T est = TreeEstimator() est.fit(np.dot(X, Xt), y) assert_raises(ValueError, est.predict, X) assert_raises(ValueError, est.apply, X) clf = TreeEstimator() clf.fit(X, y) assert_raises(ValueError, clf.predict, Xt) assert_raises(ValueError, clf.apply, Xt) # apply before fitting est = TreeEstimator() assert_raises(NotFittedError, est.apply, T) def test_min_samples_leaf(): # Test if leaves contain more than leaf_count training examples X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE)) y = iris.target # test both DepthFirstTreeBuilder and BestFirstTreeBuilder # by setting max_leaf_nodes for max_leaf_nodes in (None, 1000): for name, TreeEstimator in ALL_TREES.items(): est = TreeEstimator(min_samples_leaf=5, max_leaf_nodes=max_leaf_nodes, random_state=0) est.fit(X, y) out = est.tree_.apply(X) node_counts = np.bincount(out) # drop inner nodes leaf_count = node_counts[node_counts != 0] assert_greater(np.min(leaf_count), 4, "Failed with {0}".format(name)) def check_min_weight_fraction_leaf(name, datasets, sparse=False): """Test if leaves contain at least min_weight_fraction_leaf of the training set""" if sparse: X = DATASETS[datasets]["X_sparse"].astype(np.float32) else: X = DATASETS[datasets]["X"].astype(np.float32) y = DATASETS[datasets]["y"] weights = rng.rand(X.shape[0]) total_weight = np.sum(weights) TreeEstimator = ALL_TREES[name] # test both DepthFirstTreeBuilder and BestFirstTreeBuilder # by setting max_leaf_nodes for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)): est = TreeEstimator(min_weight_fraction_leaf=frac, max_leaf_nodes=max_leaf_nodes, random_state=0) est.fit(X, y, sample_weight=weights) if sparse: out = est.tree_.apply(X.tocsr()) else: out = est.tree_.apply(X) node_weights = np.bincount(out, weights=weights) # drop inner nodes leaf_weights = node_weights[node_weights != 0] assert_greater_equal( np.min(leaf_weights), total_weight * est.min_weight_fraction_leaf, "Failed with {0} " "min_weight_fraction_leaf={1}".format( name, est.min_weight_fraction_leaf)) def test_min_weight_fraction_leaf(): # Check on dense input for name in ALL_TREES: yield check_min_weight_fraction_leaf, name, "iris" # Check on sparse input for name in SPARSE_TREES: yield check_min_weight_fraction_leaf, name, "multilabel", True def test_pickle(): # Check that tree estimator are pickable for name, TreeClassifier in CLF_TREES.items(): clf = TreeClassifier(random_state=0) clf.fit(iris.data, iris.target) score = clf.score(iris.data, iris.target) serialized_object = pickle.dumps(clf) clf2 = pickle.loads(serialized_object) assert_equal(type(clf2), clf.__class__) score2 = clf2.score(iris.data, iris.target) assert_equal(score, score2, "Failed to generate same score " "after pickling (classification) " "with {0}".format(name)) for name, TreeRegressor in REG_TREES.items(): reg = TreeRegressor(random_state=0) reg.fit(boston.data, boston.target) score = reg.score(boston.data, boston.target) serialized_object = pickle.dumps(reg) reg2 = pickle.loads(serialized_object) assert_equal(type(reg2), reg.__class__) score2 = reg2.score(boston.data, boston.target) assert_equal(score, score2, "Failed to generate same score " "after pickling (regression) " "with {0}".format(name)) def test_multioutput(): # Check estimators on multi-output problems. X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1], [-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]] y = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2], [-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]] T = [[-1, -1], [1, 1], [-1, 1], [1, -1]] y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]] # toy classification problem for name, TreeClassifier in CLF_TREES.items(): clf = TreeClassifier(random_state=0) y_hat = clf.fit(X, y).predict(T) assert_array_equal(y_hat, y_true) assert_equal(y_hat.shape, (4, 2)) proba = clf.predict_proba(T) assert_equal(len(proba), 2) assert_equal(proba[0].shape, (4, 2)) assert_equal(proba[1].shape, (4, 4)) log_proba = clf.predict_log_proba(T) assert_equal(len(log_proba), 2) assert_equal(log_proba[0].shape, (4, 2)) assert_equal(log_proba[1].shape, (4, 4)) # toy regression problem for name, TreeRegressor in REG_TREES.items(): reg = TreeRegressor(random_state=0) y_hat = reg.fit(X, y).predict(T) assert_almost_equal(y_hat, y_true) assert_equal(y_hat.shape, (4, 2)) def test_classes_shape(): # Test that n_classes_ and classes_ have proper shape. for name, TreeClassifier in CLF_TREES.items(): # Classification, single output clf = TreeClassifier(random_state=0) clf.fit(X, y) assert_equal(clf.n_classes_, 2) assert_array_equal(clf.classes_, [-1, 1]) # Classification, multi-output _y = np.vstack((y, np.array(y) * 2)).T clf = TreeClassifier(random_state=0) clf.fit(X, _y) assert_equal(len(clf.n_classes_), 2) assert_equal(len(clf.classes_), 2) assert_array_equal(clf.n_classes_, [2, 2]) assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]]) def test_unbalanced_iris(): # Check class rebalancing. unbalanced_X = iris.data[:125] unbalanced_y = iris.target[:125] sample_weight = _balance_weights(unbalanced_y) for name, TreeClassifier in CLF_TREES.items(): clf = TreeClassifier(random_state=0) clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight) assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y) def test_memory_layout(): # Check that it works no matter the memory layout for (name, TreeEstimator), dtype in product(ALL_TREES.items(), [np.float64, np.float32]): est = TreeEstimator(random_state=0) # Nothing X = np.asarray(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # C-order X = np.asarray(iris.data, order="C", dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # F-order X = np.asarray(iris.data, order="F", dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # Contiguous X = np.ascontiguousarray(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) if est.splitter in SPARSE_SPLITTERS: # csr matrix X = csr_matrix(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # csc_matrix X = csc_matrix(iris.data, dtype=dtype) y = iris.target assert_array_equal(est.fit(X, y).predict(X), y) # Strided X = np.asarray(iris.data[::3], dtype=dtype) y = iris.target[::3] assert_array_equal(est.fit(X, y).predict(X), y) def test_sample_weight(): # Check sample weighting. # Test that zero-weighted samples are not taken into account X = np.arange(100)[:, np.newaxis] y = np.ones(100) y[:50] = 0.0 sample_weight = np.ones(100) sample_weight[y == 0] = 0.0 clf = DecisionTreeClassifier(random_state=0) clf.fit(X, y, sample_weight=sample_weight) assert_array_equal(clf.predict(X), np.ones(100)) # Test that low weighted samples are not taken into account at low depth X = np.arange(200)[:, np.newaxis] y = np.zeros(200) y[50:100] = 1 y[100:200] = 2 X[100:200, 0] = 200 sample_weight = np.ones(200) sample_weight[y == 2] = .51 # Samples of class '2' are still weightier clf = DecisionTreeClassifier(max_depth=1, random_state=0) clf.fit(X, y, sample_weight=sample_weight) assert_equal(clf.tree_.threshold[0], 149.5) sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier clf = DecisionTreeClassifier(max_depth=1, random_state=0) clf.fit(X, y, sample_weight=sample_weight) assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved # Test that sample weighting is the same as having duplicates X = iris.data y = iris.target duplicates = rng.randint(0, X.shape[0], 100) clf = DecisionTreeClassifier(random_state=1) clf.fit(X[duplicates], y[duplicates]) sample_weight = np.bincount(duplicates, minlength=X.shape[0]) clf2 = DecisionTreeClassifier(random_state=1) clf2.fit(X, y, sample_weight=sample_weight) internal = clf.tree_.children_left != tree._tree.TREE_LEAF assert_array_almost_equal(clf.tree_.threshold[internal], clf2.tree_.threshold[internal]) def test_sample_weight_invalid(): # Check sample weighting raises errors. X = np.arange(100)[:, np.newaxis] y = np.ones(100) y[:50] = 0.0 clf = DecisionTreeClassifier(random_state=0) sample_weight = np.random.rand(100, 1) assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight) sample_weight = np.array(0) assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight) sample_weight = np.ones(101) assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight) sample_weight = np.ones(99) assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight) def check_class_weights(name): """Check class_weights resemble sample_weights behavior.""" TreeClassifier = CLF_TREES[name] # Iris is balanced, so no effect expected for using 'balanced' weights clf1 = TreeClassifier(random_state=0) clf1.fit(iris.data, iris.target) clf2 = TreeClassifier(class_weight='balanced', random_state=0) clf2.fit(iris.data, iris.target) assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) # Make a multi-output problem with three copies of Iris iris_multi = np.vstack((iris.target, iris.target, iris.target)).T # Create user-defined weights that should balance over the outputs clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.}, {0: 2., 1: 1., 2: 2.}, {0: 1., 1: 2., 2: 2.}], random_state=0) clf3.fit(iris.data, iris_multi) assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_) # Check against multi-output "auto" which should also have no effect clf4 = TreeClassifier(class_weight='balanced', random_state=0) clf4.fit(iris.data, iris_multi) assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_) # Inflate importance of class 1, check against user-defined weights sample_weight = np.ones(iris.target.shape) sample_weight[iris.target == 1] *= 100 class_weight = {0: 1., 1: 100., 2: 1.} clf1 = TreeClassifier(random_state=0) clf1.fit(iris.data, iris.target, sample_weight) clf2 = TreeClassifier(class_weight=class_weight, random_state=0) clf2.fit(iris.data, iris.target) assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) # Check that sample_weight and class_weight are multiplicative clf1 = TreeClassifier(random_state=0) clf1.fit(iris.data, iris.target, sample_weight ** 2) clf2 = TreeClassifier(class_weight=class_weight, random_state=0) clf2.fit(iris.data, iris.target, sample_weight) assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_) def test_class_weights(): for name in CLF_TREES: yield check_class_weights, name def check_class_weight_errors(name): # Test if class_weight raises errors and warnings when expected. TreeClassifier = CLF_TREES[name] _y = np.vstack((y, np.array(y) * 2)).T # Invalid preset string clf = TreeClassifier(class_weight='the larch', random_state=0) assert_raises(ValueError, clf.fit, X, y) assert_raises(ValueError, clf.fit, X, _y) # Not a list or preset for multi-output clf = TreeClassifier(class_weight=1, random_state=0) assert_raises(ValueError, clf.fit, X, _y) # Incorrect length list for multi-output clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0) assert_raises(ValueError, clf.fit, X, _y) def test_class_weight_errors(): for name in CLF_TREES: yield check_class_weight_errors, name def test_max_leaf_nodes(): # Test greedy trees with max_depth + 1 leafs. from sklearn.tree._tree import TREE_LEAF X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) k = 4 for name, TreeEstimator in ALL_TREES.items(): est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y) tree = est.tree_ assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1) # max_leaf_nodes in (0, 1) should raise ValueError est = TreeEstimator(max_depth=None, max_leaf_nodes=0) assert_raises(ValueError, est.fit, X, y) est = TreeEstimator(max_depth=None, max_leaf_nodes=1) assert_raises(ValueError, est.fit, X, y) est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1) assert_raises(ValueError, est.fit, X, y) def test_max_leaf_nodes_max_depth(): # Test preceedence of max_leaf_nodes over max_depth. X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1) k = 4 for name, TreeEstimator in ALL_TREES.items(): est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y) tree = est.tree_ assert_greater(tree.max_depth, 1) def test_arrays_persist(): # Ensure property arrays' memory stays alive when tree disappears # non-regression for #2726 for attr in ['n_classes', 'value', 'children_left', 'children_right', 'threshold', 'impurity', 'feature', 'n_node_samples']: value = getattr(DecisionTreeClassifier().fit([[0]], [0]).tree_, attr) # if pointing to freed memory, contents may be arbitrary assert_true(-2 <= value.flat[0] < 2, 'Array points to arbitrary memory') def test_only_constant_features(): random_state = check_random_state(0) X = np.zeros((10, 20)) y = random_state.randint(0, 2, (10, )) for name, TreeEstimator in ALL_TREES.items(): est = TreeEstimator(random_state=0) est.fit(X, y) assert_equal(est.tree_.max_depth, 0) def test_with_only_one_non_constant_features(): X = np.hstack([np.array([[1.], [1.], [0.], [0.]]), np.zeros((4, 1000))]) y = np.array([0., 1., 0., 1.0]) for name, TreeEstimator in CLF_TREES.items(): est = TreeEstimator(random_state=0, max_features=1) est.fit(X, y) assert_equal(est.tree_.max_depth, 1) assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2))) for name, TreeEstimator in REG_TREES.items(): est = TreeEstimator(random_state=0, max_features=1) est.fit(X, y) assert_equal(est.tree_.max_depth, 1) assert_array_equal(est.predict(X), 0.5 * np.ones((4, ))) def test_big_input(): # Test if the warning for too large inputs is appropriate. X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1) clf = DecisionTreeClassifier() try: clf.fit(X, [0, 1, 0, 1]) except ValueError as e: assert_in("float32", str(e)) def test_realloc(): from sklearn.tree._utils import _realloc_test assert_raises(MemoryError, _realloc_test) def test_huge_allocations(): n_bits = int(platform.architecture()[0].rstrip('bit')) X = np.random.randn(10, 2) y = np.random.randint(0, 2, 10) # Sanity check: we cannot request more memory than the size of the address # space. Currently raises OverflowError. huge = 2 ** (n_bits + 1) clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge) assert_raises(Exception, clf.fit, X, y) # Non-regression test: MemoryError used to be dropped by Cython # because of missing "except *". huge = 2 ** (n_bits - 1) - 1 clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge) assert_raises(MemoryError, clf.fit, X, y) def check_sparse_input(tree, dataset, max_depth=None): TreeEstimator = ALL_TREES[tree] X = DATASETS[dataset]["X"] X_sparse = DATASETS[dataset]["X_sparse"] y = DATASETS[dataset]["y"] # Gain testing time if dataset in ["digits", "boston"]: n_samples = X.shape[0] // 5 X = X[:n_samples] X_sparse = X_sparse[:n_samples] y = y[:n_samples] for sparse_format in (csr_matrix, csc_matrix, coo_matrix): X_sparse = sparse_format(X_sparse) # Check the default (depth first search) d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y) s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) y_pred = d.predict(X) if tree in CLF_TREES: y_proba = d.predict_proba(X) y_log_proba = d.predict_log_proba(X) for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix): X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32) assert_array_almost_equal(s.predict(X_sparse_test), y_pred) if tree in CLF_TREES: assert_array_almost_equal(s.predict_proba(X_sparse_test), y_proba) assert_array_almost_equal(s.predict_log_proba(X_sparse_test), y_log_proba) def test_sparse_input(): for tree, dataset in product(SPARSE_TREES, ("clf_small", "toy", "digits", "multilabel", "sparse-pos", "sparse-neg", "sparse-mix", "zeros")): max_depth = 3 if dataset == "digits" else None yield (check_sparse_input, tree, dataset, max_depth) # Due to numerical instability of MSE and too strict test, we limit the # maximal depth for tree, dataset in product(REG_TREES, ["boston", "reg_small"]): if tree in SPARSE_TREES: yield (check_sparse_input, tree, dataset, 2) def check_sparse_parameters(tree, dataset): TreeEstimator = ALL_TREES[tree] X = DATASETS[dataset]["X"] X_sparse = DATASETS[dataset]["X_sparse"] y = DATASETS[dataset]["y"] # Check max_features d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y) s = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) assert_array_almost_equal(s.predict(X), d.predict(X)) # Check min_samples_split d = TreeEstimator(random_state=0, max_features=1, min_samples_split=10).fit(X, y) s = TreeEstimator(random_state=0, max_features=1, min_samples_split=10).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) assert_array_almost_equal(s.predict(X), d.predict(X)) # Check min_samples_leaf d = TreeEstimator(random_state=0, min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y) s = TreeEstimator(random_state=0, min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) assert_array_almost_equal(s.predict(X), d.predict(X)) # Check best-first search d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y) s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) assert_array_almost_equal(s.predict(X), d.predict(X)) def test_sparse_parameters(): for tree, dataset in product(SPARSE_TREES, ["sparse-pos", "sparse-neg", "sparse-mix", "zeros"]): yield (check_sparse_parameters, tree, dataset) def check_sparse_criterion(tree, dataset): TreeEstimator = ALL_TREES[tree] X = DATASETS[dataset]["X"] X_sparse = DATASETS[dataset]["X_sparse"] y = DATASETS[dataset]["y"] # Check various criterion CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS for criterion in CRITERIONS: d = TreeEstimator(random_state=0, max_depth=3, criterion=criterion).fit(X, y) s = TreeEstimator(random_state=0, max_depth=3, criterion=criterion).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) assert_array_almost_equal(s.predict(X), d.predict(X)) def test_sparse_criterion(): for tree, dataset in product(SPARSE_TREES, ["sparse-pos", "sparse-neg", "sparse-mix", "zeros"]): yield (check_sparse_criterion, tree, dataset) def check_explicit_sparse_zeros(tree, max_depth=3, n_features=10): TreeEstimator = ALL_TREES[tree] # n_samples set n_feature to ease construction of a simultaneous # construction of a csr and csc matrix n_samples = n_features samples = np.arange(n_samples) # Generate X, y random_state = check_random_state(0) indices = [] data = [] offset = 0 indptr = [offset] for i in range(n_features): n_nonzero_i = random_state.binomial(n_samples, 0.5) indices_i = random_state.permutation(samples)[:n_nonzero_i] indices.append(indices_i) data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1 data.append(data_i) offset += n_nonzero_i indptr.append(offset) indices = np.concatenate(indices) data = np.array(np.concatenate(data), dtype=np.float32) X_sparse = csc_matrix((data, indices, indptr), shape=(n_samples, n_features)) X = X_sparse.toarray() X_sparse_test = csr_matrix((data, indices, indptr), shape=(n_samples, n_features)) X_test = X_sparse_test.toarray() y = random_state.randint(0, 3, size=(n_samples, )) # Ensure that X_sparse_test owns its data, indices and indptr array X_sparse_test = X_sparse_test.copy() # Ensure that we have explicit zeros assert_greater((X_sparse.data == 0.).sum(), 0) assert_greater((X_sparse_test.data == 0.).sum(), 0) # Perform the comparison d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y) s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y) assert_tree_equal(d.tree_, s.tree_, "{0} with dense and sparse format gave different " "trees".format(tree)) Xs = (X_test, X_sparse_test) for X1, X2 in product(Xs, Xs): assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2)) assert_array_almost_equal(s.apply(X1), d.apply(X2)) assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1)) assert_array_almost_equal(s.predict(X1), d.predict(X2)) if tree in CLF_TREES: assert_array_almost_equal(s.predict_proba(X1), d.predict_proba(X2)) def test_explicit_sparse_zeros(): for tree in SPARSE_TREES: yield (check_explicit_sparse_zeros, tree) @ignore_warnings def check_raise_error_on_1d_input(name): TreeEstimator = ALL_TREES[name] X = iris.data[:, 0].ravel() X_2d = iris.data[:, 0].reshape((-1, 1)) y = iris.target assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y) est = TreeEstimator(random_state=0) est.fit(X_2d, y) assert_raises(ValueError, est.predict, [X]) @ignore_warnings def test_1d_input(): for name in ALL_TREES: yield check_raise_error_on_1d_input, name def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight): # Private function to keep pretty printing in nose yielded tests est = TreeEstimator(random_state=0) est.fit(X, y, sample_weight=sample_weight) assert_equal(est.tree_.max_depth, 1) est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4) est.fit(X, y, sample_weight=sample_weight) assert_equal(est.tree_.max_depth, 0) def check_min_weight_leaf_split_level(name): TreeEstimator = ALL_TREES[name] X = np.array([[0], [0], [0], [0], [1]]) y = [0, 0, 0, 0, 1] sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2] _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight) if TreeEstimator().splitter in SPARSE_SPLITTERS: _check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y, sample_weight) def test_min_weight_leaf_split_level(): for name in ALL_TREES: yield check_min_weight_leaf_split_level, name def check_public_apply(name): X_small32 = X_small.astype(tree._tree.DTYPE) est = ALL_TREES[name]() est.fit(X_small, y_small) assert_array_equal(est.apply(X_small), est.tree_.apply(X_small32)) def check_public_apply_sparse(name): X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE)) est = ALL_TREES[name]() est.fit(X_small, y_small) assert_array_equal(est.apply(X_small), est.tree_.apply(X_small32)) def test_public_apply(): for name in ALL_TREES: yield (check_public_apply, name) for name in SPARSE_TREES: yield (check_public_apply_sparse, name)
bsd-3-clause
kaczla/PJN
src/Przecinki/scikit.py
1
1048
#!/usr/bin/python2 # -*- coding: utf-8 -*- import sys import matplotlib.pyplot as plt import numpy as np from sklearn import datasets from sklearn.cross_validation import cross_val_predict from sklearn import linear_model from sklearn import datasets X = [] Y = [] for line in sys.stdin: line = line.rstrip() X.append([len(line.split())]) Y.append(line.count(",")) lr = linear_model.LinearRegression() predicted = cross_val_predict(lr, X, Y) FILE = open(sys.argv[1], "r") X_TEST = [] Y_TEST = [] for line in FILE: line = line.rstrip() Y_TEST.append(line.count(",")) line = line.replace(",", "") X_TEST.append([len(line.split())]) regr = linear_model.LinearRegression() regr.fit(X, Y) print "Coefficients: ", regr.coef_ print "Residual sum of squares: %.2f" % np.mean((regr.predict(X_TEST) - Y_TEST) ** 2) print "Variance score: %.2f" % regr.score(X_TEST, Y_TEST) plt.scatter(X_TEST, Y_TEST, color='black') plt.plot(X_TEST, regr.predict(X_TEST), color='green', linewidth=2) plt.xticks(()) plt.yticks(()) plt.show()
gpl-2.0
chaluemwut/fbserver
venv/lib/python2.7/site-packages/sklearn/neighbors/base.py
1
24541
"""Base and mixin classes for nearest neighbors""" # Authors: Jake Vanderplas <vanderplas@astro.washington.edu> # Fabian Pedregosa <fabian.pedregosa@inria.fr> # Alexandre Gramfort <alexandre.gramfort@inria.fr> # Sparseness support by Lars Buitinck <L.J.Buitinck@uva.nl> # Multi-output support by Arnaud Joly <a.joly@ulg.ac.be> # # License: BSD 3 clause (C) INRIA, University of Amsterdam import warnings from abc import ABCMeta, abstractmethod import numpy as np from scipy.sparse import csr_matrix, issparse from .ball_tree import BallTree from .kd_tree import KDTree from ..base import BaseEstimator from ..metrics import pairwise_distances from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS from ..utils import safe_asarray, atleast2d_or_csr, check_arrays from ..utils.fixes import argpartition from ..utils.validation import DataConversionWarning from ..externals import six VALID_METRICS = dict(ball_tree=BallTree.valid_metrics, kd_tree=KDTree.valid_metrics, # The following list comes from the # sklearn.metrics.pairwise doc string brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) + ['braycurtis', 'canberra', 'chebyshev', 'correlation', 'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule', 'wminkowski'])) VALID_METRICS_SPARSE = dict(ball_tree=[], kd_tree=[], brute=PAIRWISE_DISTANCE_FUNCTIONS.keys()) class NeighborsWarning(UserWarning): pass # Make sure that NeighborsWarning are displayed more than once warnings.simplefilter("always", NeighborsWarning) def _check_weights(weights): """Check to make sure weights are valid""" if weights in (None, 'uniform', 'distance'): return weights elif callable(weights): return weights else: raise ValueError("weights not recognized: should be 'uniform', " "'distance', or a callable function") def _get_weights(dist, weights): """Get the weights from an array of distances and a parameter ``weights`` Parameters =========== dist: ndarray The input distances weights: {'uniform', 'distance' or a callable} The kind of weighting used Returns ======== weights_arr: array of the same shape as ``dist`` if ``weights == 'uniform'``, then returns None """ if weights in (None, 'uniform'): return None elif weights == 'distance': with np.errstate(divide='ignore'): dist = 1. / dist return dist elif callable(weights): return weights(dist) else: raise ValueError("weights not recognized: should be 'uniform', " "'distance', or a callable function") class NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)): """Base class for nearest neighbors estimators.""" @abstractmethod def __init__(self): pass def _init_params(self, n_neighbors=None, radius=None, algorithm='auto', leaf_size=30, metric='minkowski', p=2, metric_params=None, **kwargs): if kwargs: warnings.warn("Passing additional arguments to the metric " "function as **kwargs is deprecated " "and will no longer be supported in 0.18. " "Use metric_params instead.", DeprecationWarning, stacklevel=3) if metric_params is None: metric_params = {} metric_params.update(kwargs) self.n_neighbors = n_neighbors self.radius = radius self.algorithm = algorithm self.leaf_size = leaf_size self.metric = metric self.metric_params = metric_params self.p = p if algorithm not in ['auto', 'brute', 'kd_tree', 'ball_tree']: raise ValueError("unrecognized algorithm: '%s'" % algorithm) if algorithm == 'auto': alg_check = 'ball_tree' else: alg_check = algorithm if callable(metric): if algorithm == 'kd_tree': # callable metric is only valid for brute force and ball_tree raise ValueError( "kd_tree algorithm does not support callable metric '%s'" % metric) elif metric not in VALID_METRICS[alg_check]: raise ValueError("Metric '%s' not valid for algorithm '%s'" % (metric, algorithm)) if self.metric_params is not None and 'p' in self.metric_params: warnings.warn("Parameter p is found in metric_params. " "The corresponding parameter from __init__ " "is ignored.", SyntaxWarning, stacklevel=3) effective_p = metric_params['p'] else: effective_p = self.p if self.metric in ['wminkowski', 'minkowski'] and effective_p < 1: raise ValueError("p must be greater than one for minkowski metric") self._fit_X = None self._tree = None self._fit_method = None def _fit(self, X): if self.metric_params is None: self.effective_metric_params_ = {} else: self.effective_metric_params_ = self.metric_params.copy() effective_p = self.effective_metric_params_.get('p', self.p) if self.metric in ['wminkowski', 'minkowski']: self.effective_metric_params_['p'] = effective_p self.effective_metric_ = self.metric # For minkowski distance, use more efficient methods where available if self.metric == 'minkowski': p = self.effective_metric_params_.pop('p', 2) if p < 1: raise ValueError("p must be greater than one " "for minkowski metric") elif p == 1: self.effective_metric_ = 'manhattan' elif p == 2: self.effective_metric_ = 'euclidean' elif p == np.inf: self.effective_metric_ = 'chebyshev' else: self.effective_metric_params_['p'] = p if isinstance(X, NeighborsBase): self._fit_X = X._fit_X self._tree = X._tree self._fit_method = X._fit_method return self elif isinstance(X, BallTree): self._fit_X = X.data self._tree = X self._fit_method = 'ball_tree' return self elif isinstance(X, KDTree): self._fit_X = X.data self._tree = X self._fit_method = 'kd_tree' return self X = atleast2d_or_csr(X, copy=False) n_samples = X.shape[0] if n_samples == 0: raise ValueError("n_samples must be greater than 0") if issparse(X): if self.algorithm not in ('auto', 'brute'): warnings.warn("cannot use tree with sparse input: " "using brute force") if self.effective_metric_ not in VALID_METRICS_SPARSE['brute']: raise ValueError("metric '%s' not valid for sparse input" % self.effective_metric_) self._fit_X = X.copy() self._tree = None self._fit_method = 'brute' return self self._fit_method = self.algorithm self._fit_X = X if self._fit_method == 'auto': # A tree approach is better for small number of neighbors, # and KDTree is generally faster when available if (self.n_neighbors is None or self.n_neighbors < self._fit_X.shape[0] // 2): if self.effective_metric_ in VALID_METRICS['kd_tree']: self._fit_method = 'kd_tree' else: self._fit_method = 'ball_tree' else: self._fit_method = 'brute' if self._fit_method == 'ball_tree': self._tree = BallTree(X, self.leaf_size, metric=self.effective_metric_, **self.effective_metric_params_) elif self._fit_method == 'kd_tree': self._tree = KDTree(X, self.leaf_size, metric=self.effective_metric_, **self.effective_metric_params_) elif self._fit_method == 'brute': self._tree = None else: raise ValueError("algorithm = '%s' not recognized" % self.algorithm) return self class KNeighborsMixin(object): """Mixin for k-neighbors searches""" def kneighbors(self, X, n_neighbors=None, return_distance=True): """Finds the K-neighbors of a point. Returns distance Parameters ---------- X : array-like, last dimension same as that of fit data The new point. n_neighbors : int Number of neighbors to get (default is the value passed to the constructor). return_distance : boolean, optional. Defaults to True. If False, distances will not be returned Returns ------- dist : array Array representing the lengths to point, only present if return_distance=True ind : array Indices of the nearest points in the population matrix. Examples -------- In the following example, we construct a NeighborsClassifier class from an array representing our data set and ask who's the closest point to [1,1,1] >>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]] >>> from sklearn.neighbors import NearestNeighbors >>> neigh = NearestNeighbors(n_neighbors=1) >>> neigh.fit(samples) # doctest: +ELLIPSIS NearestNeighbors(algorithm='auto', leaf_size=30, ...) >>> print(neigh.kneighbors([1., 1., 1.])) # doctest: +ELLIPSIS (array([[ 0.5]]), array([[2]]...)) As you can see, it returns [[0.5]], and [[2]], which means that the element is at distance 0.5 and is the third element of samples (indexes start at 0). You can also query for multiple points: >>> X = [[0., 1., 0.], [1., 0., 1.]] >>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS array([[1], [2]]...) """ if self._fit_method is None: raise ValueError("must fit neighbors before querying") X = atleast2d_or_csr(X) if n_neighbors is None: n_neighbors = self.n_neighbors if self._fit_method == 'brute': # for efficiency, use squared euclidean distances if self.effective_metric_ == 'euclidean': dist = pairwise_distances(X, self._fit_X, 'euclidean', squared=True) else: dist = pairwise_distances(X, self._fit_X, self.effective_metric_, **self.effective_metric_params_) neigh_ind = argpartition(dist, n_neighbors - 1, axis=1) neigh_ind = neigh_ind[:, :n_neighbors] # argpartition doesn't guarantee sorted order, so we sort again j = np.arange(neigh_ind.shape[0])[:, None] neigh_ind = neigh_ind[j, np.argsort(dist[j, neigh_ind])] if return_distance: if self.effective_metric_ == 'euclidean': return np.sqrt(dist[j, neigh_ind]), neigh_ind else: return dist[j, neigh_ind], neigh_ind else: return neigh_ind elif self._fit_method in ['ball_tree', 'kd_tree']: result = self._tree.query(X, n_neighbors, return_distance=return_distance) return result else: raise ValueError("internal: _fit_method not recognized") def kneighbors_graph(self, X, n_neighbors=None, mode='connectivity'): """Computes the (weighted) graph of k-Neighbors for points in X Parameters ---------- X : array-like, shape = [n_samples, n_features] Sample data n_neighbors : int Number of neighbors for each sample. (default is value passed to the constructor). mode : {'connectivity', 'distance'}, optional Type of returned matrix: 'connectivity' will return the connectivity matrix with ones and zeros, in 'distance' the edges are Euclidean distance between points. Returns ------- A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit] n_samples_fit is the number of samples in the fitted data A[i, j] is assigned the weight of edge that connects i to j. Examples -------- >>> X = [[0], [3], [1]] >>> from sklearn.neighbors import NearestNeighbors >>> neigh = NearestNeighbors(n_neighbors=2) >>> neigh.fit(X) # doctest: +ELLIPSIS NearestNeighbors(algorithm='auto', leaf_size=30, ...) >>> A = neigh.kneighbors_graph(X) >>> A.toarray() array([[ 1., 0., 1.], [ 0., 1., 1.], [ 1., 0., 1.]]) See also -------- NearestNeighbors.radius_neighbors_graph """ X = safe_asarray(X) if n_neighbors is None: n_neighbors = self.n_neighbors n_samples1 = X.shape[0] n_samples2 = self._fit_X.shape[0] n_nonzero = n_samples1 * n_neighbors A_indptr = np.arange(0, n_nonzero + 1, n_neighbors) # construct CSR matrix representation of the k-NN graph if mode == 'connectivity': A_data = np.ones((n_samples1, n_neighbors)) A_ind = self.kneighbors(X, n_neighbors, return_distance=False) elif mode == 'distance': data, ind = self.kneighbors(X, n_neighbors + 1, return_distance=True) A_data, A_ind = data[:, 1:], ind[:, 1:] else: raise ValueError( 'Unsupported mode, must be one of "connectivity" ' 'or "distance" but got "%s" instead' % mode) return csr_matrix((A_data.ravel(), A_ind.ravel(), A_indptr), shape=(n_samples1, n_samples2)) class RadiusNeighborsMixin(object): """Mixin for radius-based neighbors searches""" def radius_neighbors(self, X, radius=None, return_distance=True): """Finds the neighbors within a given radius of a point or points. Returns indices of and distances to the neighbors of each point. Parameters ---------- X : array-like, last dimension same as that of fit data The new point or points radius : float Limiting distance of neighbors to return. (default is the value passed to the constructor). return_distance : boolean, optional. Defaults to True. If False, distances will not be returned Returns ------- dist : array Array representing the euclidean distances to each point, only present if return_distance=True. ind : array Indices of the nearest points in the population matrix. Examples -------- In the following example, we construct a NeighborsClassifier class from an array representing our data set and ask who's the closest point to [1,1,1] >>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]] >>> from sklearn.neighbors import NearestNeighbors >>> neigh = NearestNeighbors(radius=1.6) >>> neigh.fit(samples) # doctest: +ELLIPSIS NearestNeighbors(algorithm='auto', leaf_size=30, ...) >>> print(neigh.radius_neighbors([1., 1., 1.])) # doctest: +ELLIPSIS (array([[ 1.5, 0.5]]...), array([[1, 2]]...) The first array returned contains the distances to all points which are closer than 1.6, while the second array returned contains their indices. In general, multiple points can be queried at the same time. Notes ----- Because the number of neighbors of each point is not necessarily equal, the results for multiple query points cannot be fit in a standard data array. For efficiency, `radius_neighbors` returns arrays of objects, where each object is a 1D array of indices or distances. """ if self._fit_method is None: raise ValueError("must fit neighbors before querying") X = atleast2d_or_csr(X) if radius is None: radius = self.radius if self._fit_method == 'brute': # for efficiency, use squared euclidean distances if self.effective_metric_ == 'euclidean': dist = pairwise_distances(X, self._fit_X, 'euclidean', squared=True) radius *= radius else: dist = pairwise_distances(X, self._fit_X, self.effective_metric_, **self.effective_metric_params_) neigh_ind = [np.where(d < radius)[0] for d in dist] # if there are the same number of neighbors for each point, # we can do a normal array. Otherwise, we return an object # array with elements that are numpy arrays try: neigh_ind = np.asarray(neigh_ind, dtype=int) dtype_F = float except ValueError: neigh_ind = np.asarray(neigh_ind, dtype='object') dtype_F = object if return_distance: if self.effective_metric_ == 'euclidean': dist = np.array([np.sqrt(d[neigh_ind[i]]) for i, d in enumerate(dist)], dtype=dtype_F) else: dist = np.array([d[neigh_ind[i]] for i, d in enumerate(dist)], dtype=dtype_F) return dist, neigh_ind else: return neigh_ind elif self._fit_method in ['ball_tree', 'kd_tree']: results = self._tree.query_radius(X, radius, return_distance=return_distance) if return_distance: ind, dist = results return dist, ind else: return results else: raise ValueError("internal: _fit_method not recognized") def radius_neighbors_graph(self, X, radius=None, mode='connectivity'): """Computes the (weighted) graph of Neighbors for points in X Neighborhoods are restricted the points at a distance lower than radius. Parameters ---------- X : array-like, shape = [n_samples, n_features] Sample data radius : float Radius of neighborhoods. (default is the value passed to the constructor). mode : {'connectivity', 'distance'}, optional Type of returned matrix: 'connectivity' will return the connectivity matrix with ones and zeros, in 'distance' the edges are Euclidean distance between points. Returns ------- A : sparse matrix in CSR format, shape = [n_samples, n_samples] A[i, j] is assigned the weight of edge that connects i to j. Examples -------- >>> X = [[0], [3], [1]] >>> from sklearn.neighbors import NearestNeighbors >>> neigh = NearestNeighbors(radius=1.5) >>> neigh.fit(X) # doctest: +ELLIPSIS NearestNeighbors(algorithm='auto', leaf_size=30, ...) >>> A = neigh.radius_neighbors_graph(X) >>> A.toarray() array([[ 1., 0., 1.], [ 0., 1., 0.], [ 1., 0., 1.]]) See also -------- kneighbors_graph """ X = safe_asarray(X) if radius is None: radius = self.radius n_samples1 = X.shape[0] n_samples2 = self._fit_X.shape[0] # construct CSR matrix representation of the NN graph if mode == 'connectivity': A_ind = self.radius_neighbors(X, radius, return_distance=False) A_data = None elif mode == 'distance': dist, A_ind = self.radius_neighbors(X, radius, return_distance=True) A_data = np.concatenate(list(dist)) else: raise ValueError( 'Unsupported mode, must be one of "connectivity", ' 'or "distance" but got %s instead' % mode) n_neighbors = np.array([len(a) for a in A_ind]) n_nonzero = np.sum(n_neighbors) if A_data is None: A_data = np.ones(n_nonzero) A_ind = np.concatenate(list(A_ind)) A_indptr = np.concatenate((np.zeros(1, dtype=int), np.cumsum(n_neighbors))) return csr_matrix((A_data, A_ind, A_indptr), shape=(n_samples1, n_samples2)) class SupervisedFloatMixin(object): def fit(self, X, y): """Fit the model using X as training data and y as target values Parameters ---------- X : {array-like, sparse matrix, BallTree, KDTree} Training data. If array or matrix, shape = [n_samples, n_features] y : {array-like, sparse matrix} Target values, array of float values, shape = [n_samples] or [n_samples, n_outputs] """ if not isinstance(X, (KDTree, BallTree)): X, y = check_arrays(X, y, sparse_format="csr") self._y = y return self._fit(X) class SupervisedIntegerMixin(object): def fit(self, X, y): """Fit the model using X as training data and y as target values Parameters ---------- X : {array-like, sparse matrix, BallTree, KDTree} Training data. If array or matrix, shape = [n_samples, n_features] y : {array-like, sparse matrix} Target values of shape = [n_samples] or [n_samples, n_outputs] """ if not isinstance(X, (KDTree, BallTree)): X, y = check_arrays(X, y, sparse_format="csr") if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1: if y.ndim != 1: warnings.warn("A column-vector y was passed when a 1d array " "was expected. Please change the shape of y to " "(n_samples, ), for example using ravel().", DataConversionWarning, stacklevel=2) self.outputs_2d_ = False y = y.reshape((-1, 1)) else: self.outputs_2d_ = True self.classes_ = [] self._y = np.empty(y.shape, dtype=np.int) for k in range(self._y.shape[1]): classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True) self.classes_.append(classes) if not self.outputs_2d_: self.classes_ = self.classes_[0] self._y = self._y.ravel() return self._fit(X) class UnsupervisedMixin(object): def fit(self, X, y=None): """Fit the model using X as training data Parameters ---------- X : {array-like, sparse matrix, BallTree, KDTree} Training data. If array or matrix, shape = [n_samples, n_features] """ return self._fit(X)
apache-2.0
Vimos/scikit-learn
sklearn/ensemble/tests/test_partial_dependence.py
365
6996
""" Testing for the partial dependence module. """ import numpy as np from numpy.testing import assert_array_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import if_matplotlib from sklearn.ensemble.partial_dependence import partial_dependence from sklearn.ensemble.partial_dependence import plot_partial_dependence from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import GradientBoostingRegressor from sklearn import datasets # toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] y = [-1, -1, -1, 1, 1, 1] T = [[-1, -1], [2, 2], [3, 2]] true_result = [-1, 1, 1] # also load the boston dataset boston = datasets.load_boston() # also load the iris dataset iris = datasets.load_iris() def test_partial_dependence_classifier(): # Test partial dependence for classifier clf = GradientBoostingClassifier(n_estimators=10, random_state=1) clf.fit(X, y) pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5) # only 4 grid points instead of 5 because only 4 unique X[:,0] vals assert pdp.shape == (1, 4) assert axes[0].shape[0] == 4 # now with our own grid X_ = np.asarray(X) grid = np.unique(X_[:, 0]) pdp_2, axes = partial_dependence(clf, [0], grid=grid) assert axes is None assert_array_equal(pdp, pdp_2) def test_partial_dependence_multiclass(): # Test partial dependence for multi-class classifier clf = GradientBoostingClassifier(n_estimators=10, random_state=1) clf.fit(iris.data, iris.target) grid_resolution = 25 n_classes = clf.n_classes_ pdp, axes = partial_dependence( clf, [0], X=iris.data, grid_resolution=grid_resolution) assert pdp.shape == (n_classes, grid_resolution) assert len(axes) == 1 assert axes[0].shape[0] == grid_resolution def test_partial_dependence_regressor(): # Test partial dependence for regressor clf = GradientBoostingRegressor(n_estimators=10, random_state=1) clf.fit(boston.data, boston.target) grid_resolution = 25 pdp, axes = partial_dependence( clf, [0], X=boston.data, grid_resolution=grid_resolution) assert pdp.shape == (1, grid_resolution) assert axes[0].shape[0] == grid_resolution def test_partial_dependecy_input(): # Test input validation of partial dependence. clf = GradientBoostingClassifier(n_estimators=10, random_state=1) clf.fit(X, y) assert_raises(ValueError, partial_dependence, clf, [0], grid=None, X=None) assert_raises(ValueError, partial_dependence, clf, [0], grid=[0, 1], X=X) # first argument must be an instance of BaseGradientBoosting assert_raises(ValueError, partial_dependence, {}, [0], X=X) # Gradient boosting estimator must be fit assert_raises(ValueError, partial_dependence, GradientBoostingClassifier(), [0], X=X) assert_raises(ValueError, partial_dependence, clf, [-1], X=X) assert_raises(ValueError, partial_dependence, clf, [100], X=X) # wrong ndim for grid grid = np.random.rand(10, 2, 1) assert_raises(ValueError, partial_dependence, clf, [0], grid=grid) @if_matplotlib def test_plot_partial_dependence(): # Test partial dependence plot function. clf = GradientBoostingRegressor(n_estimators=10, random_state=1) clf.fit(boston.data, boston.target) grid_resolution = 25 fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)], grid_resolution=grid_resolution, feature_names=boston.feature_names) assert len(axs) == 3 assert all(ax.has_data for ax in axs) # check with str features and array feature names fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN', ('CRIM', 'ZN')], grid_resolution=grid_resolution, feature_names=boston.feature_names) assert len(axs) == 3 assert all(ax.has_data for ax in axs) # check with list feature_names feature_names = boston.feature_names.tolist() fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN', ('CRIM', 'ZN')], grid_resolution=grid_resolution, feature_names=feature_names) assert len(axs) == 3 assert all(ax.has_data for ax in axs) @if_matplotlib def test_plot_partial_dependence_input(): # Test partial dependence plot function input checks. clf = GradientBoostingClassifier(n_estimators=10, random_state=1) # not fitted yet assert_raises(ValueError, plot_partial_dependence, clf, X, [0]) clf.fit(X, y) assert_raises(ValueError, plot_partial_dependence, clf, np.array(X)[:, :0], [0]) # first argument must be an instance of BaseGradientBoosting assert_raises(ValueError, plot_partial_dependence, {}, X, [0]) # must be larger than -1 assert_raises(ValueError, plot_partial_dependence, clf, X, [-1]) # too large feature value assert_raises(ValueError, plot_partial_dependence, clf, X, [100]) # str feature but no feature_names assert_raises(ValueError, plot_partial_dependence, clf, X, ['foobar']) # not valid features value assert_raises(ValueError, plot_partial_dependence, clf, X, [{'foo': 'bar'}]) @if_matplotlib def test_plot_partial_dependence_multiclass(): # Test partial dependence plot function on multi-class input. clf = GradientBoostingClassifier(n_estimators=10, random_state=1) clf.fit(iris.data, iris.target) grid_resolution = 25 fig, axs = plot_partial_dependence(clf, iris.data, [0, 1], label=0, grid_resolution=grid_resolution) assert len(axs) == 2 assert all(ax.has_data for ax in axs) # now with symbol labels target = iris.target_names[iris.target] clf = GradientBoostingClassifier(n_estimators=10, random_state=1) clf.fit(iris.data, target) grid_resolution = 25 fig, axs = plot_partial_dependence(clf, iris.data, [0, 1], label='setosa', grid_resolution=grid_resolution) assert len(axs) == 2 assert all(ax.has_data for ax in axs) # label not in gbrt.classes_ assert_raises(ValueError, plot_partial_dependence, clf, iris.data, [0, 1], label='foobar', grid_resolution=grid_resolution) # label not provided assert_raises(ValueError, plot_partial_dependence, clf, iris.data, [0, 1], grid_resolution=grid_resolution)
bsd-3-clause
siutanwong/scikit-learn
examples/text/document_clustering.py
230
8356
""" ======================================= Clustering text documents using k-means ======================================= This is an example showing how the scikit-learn can be used to cluster documents by topics using a bag-of-words approach. This example uses a scipy.sparse matrix to store the features instead of standard numpy arrays. Two feature extraction methods can be used in this example: - TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most frequent words to features indices and hence compute a word occurrence frequency (sparse) matrix. The word frequencies are then reweighted using the Inverse Document Frequency (IDF) vector collected feature-wise over the corpus. - HashingVectorizer hashes word occurrences to a fixed dimensional space, possibly with collisions. The word count vectors are then normalized to each have l2-norm equal to one (projected to the euclidean unit-ball) which seems to be important for k-means to work in high dimensional space. HashingVectorizer does not provide IDF weighting as this is a stateless model (the fit method does nothing). When IDF weighting is needed it can be added by pipelining its output to a TfidfTransformer instance. Two algorithms are demoed: ordinary k-means and its more scalable cousin minibatch k-means. Additionally, latent sematic analysis can also be used to reduce dimensionality and discover latent patterns in the data. It can be noted that k-means (and minibatch k-means) are very sensitive to feature scaling and that in this case the IDF weighting helps improve the quality of the clustering by quite a lot as measured against the "ground truth" provided by the class label assignments of the 20 newsgroups dataset. This improvement is not visible in the Silhouette Coefficient which is small for both as this measure seem to suffer from the phenomenon called "Concentration of Measure" or "Curse of Dimensionality" for high dimensional datasets such as text data. Other measures such as V-measure and Adjusted Rand Index are information theoretic based evaluation scores: as they are only based on cluster assignments rather than distances, hence not affected by the curse of dimensionality. Note: as k-means is optimizing a non-convex objective function, it will likely end up in a local optimum. Several runs with independent random init might be necessary to get a good convergence. """ # Author: Peter Prettenhofer <peter.prettenhofer@gmail.com> # Lars Buitinck <L.J.Buitinck@uva.nl> # License: BSD 3 clause from __future__ import print_function from sklearn.datasets import fetch_20newsgroups from sklearn.decomposition import TruncatedSVD from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_extraction.text import HashingVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.pipeline import make_pipeline from sklearn.preprocessing import Normalizer from sklearn import metrics from sklearn.cluster import KMeans, MiniBatchKMeans import logging from optparse import OptionParser import sys from time import time import numpy as np # Display progress logs on stdout logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s') # parse commandline arguments op = OptionParser() op.add_option("--lsa", dest="n_components", type="int", help="Preprocess documents with latent semantic analysis.") op.add_option("--no-minibatch", action="store_false", dest="minibatch", default=True, help="Use ordinary k-means algorithm (in batch mode).") op.add_option("--no-idf", action="store_false", dest="use_idf", default=True, help="Disable Inverse Document Frequency feature weighting.") op.add_option("--use-hashing", action="store_true", default=False, help="Use a hashing feature vectorizer") op.add_option("--n-features", type=int, default=10000, help="Maximum number of features (dimensions)" " to extract from text.") op.add_option("--verbose", action="store_true", dest="verbose", default=False, help="Print progress reports inside k-means algorithm.") print(__doc__) op.print_help() (opts, args) = op.parse_args() if len(args) > 0: op.error("this script takes no arguments.") sys.exit(1) ############################################################################### # Load some categories from the training set categories = [ 'alt.atheism', 'talk.religion.misc', 'comp.graphics', 'sci.space', ] # Uncomment the following to do the analysis on all the categories #categories = None print("Loading 20 newsgroups dataset for categories:") print(categories) dataset = fetch_20newsgroups(subset='all', categories=categories, shuffle=True, random_state=42) print("%d documents" % len(dataset.data)) print("%d categories" % len(dataset.target_names)) print() labels = dataset.target true_k = np.unique(labels).shape[0] print("Extracting features from the training dataset using a sparse vectorizer") t0 = time() if opts.use_hashing: if opts.use_idf: # Perform an IDF normalization on the output of HashingVectorizer hasher = HashingVectorizer(n_features=opts.n_features, stop_words='english', non_negative=True, norm=None, binary=False) vectorizer = make_pipeline(hasher, TfidfTransformer()) else: vectorizer = HashingVectorizer(n_features=opts.n_features, stop_words='english', non_negative=False, norm='l2', binary=False) else: vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features, min_df=2, stop_words='english', use_idf=opts.use_idf) X = vectorizer.fit_transform(dataset.data) print("done in %fs" % (time() - t0)) print("n_samples: %d, n_features: %d" % X.shape) print() if opts.n_components: print("Performing dimensionality reduction using LSA") t0 = time() # Vectorizer results are normalized, which makes KMeans behave as # spherical k-means for better results. Since LSA/SVD results are # not normalized, we have to redo the normalization. svd = TruncatedSVD(opts.n_components) normalizer = Normalizer(copy=False) lsa = make_pipeline(svd, normalizer) X = lsa.fit_transform(X) print("done in %fs" % (time() - t0)) explained_variance = svd.explained_variance_ratio_.sum() print("Explained variance of the SVD step: {}%".format( int(explained_variance * 100))) print() ############################################################################### # Do the actual clustering if opts.minibatch: km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1, init_size=1000, batch_size=1000, verbose=opts.verbose) else: km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1, verbose=opts.verbose) print("Clustering sparse data with %s" % km) t0 = time() km.fit(X) print("done in %0.3fs" % (time() - t0)) print() print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_)) print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_)) print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_)) print("Adjusted Rand-Index: %.3f" % metrics.adjusted_rand_score(labels, km.labels_)) print("Silhouette Coefficient: %0.3f" % metrics.silhouette_score(X, km.labels_, sample_size=1000)) print() if not opts.use_hashing: print("Top terms per cluster:") if opts.n_components: original_space_centroids = svd.inverse_transform(km.cluster_centers_) order_centroids = original_space_centroids.argsort()[:, ::-1] else: order_centroids = km.cluster_centers_.argsort()[:, ::-1] terms = vectorizer.get_feature_names() for i in range(true_k): print("Cluster %d:" % i, end='') for ind in order_centroids[i, :10]: print(' %s' % terms[ind], end='') print()
bsd-3-clause
COL-IU/XLSearch
xlsearch_train.py
1
5042
import sys import pickle import os import getopt from time import ctime import numpy as np usage = ''' USAGE: python xlsearch_train.py -l [path to xlsearch library] -p [parameter file] -o [output file]''' (pairs, args) = getopt.getopt(sys.argv[1:], 'l:p:o:') cmd_arg = dict() for i in range(len(pairs)): cmd_arg[pairs[i][0]] = pairs[i][1] if len(cmd_arg) != 3: print usage sys.exit(1) lib_path = cmd_arg['-l'] param_file = cmd_arg['-p'] output_file = cmd_arg['-o'] sys.path.append(lib_path) from utility import * from index import EnumIndexBuilder from fastareader import FastaReader print 'XLSearch, version 1.0' print 'Copyright of School of Informatics and Computing, Indiana University' print 'Current time %s' % ctime() print 'Training logistic regression models using authetic true-true PSMs...' print '\nReading paramters from: %s...' % param_file [param, mass] = read_param(param_file) param['ntermxlink'] = False param['neutral_loss']['h2o_loss']['aa'] = set('DEST') param['neutral_loss']['nh3_loss']['aa'] = set('KNQR') param['neutral_loss']['h2o_gain']['aa'] = set() mass['C'] = 103.009184 print 'Reading parameters done!' print '\nReading MSMS spectra files from directory: %s...' % param['ms_data'] spec_dict = read_spec(param['ms_data'], param, mass) pickle.dump(spec_dict, file('spectra.pickle', 'w')) print 'Total number of spectra: %d' % len(spec_dict) print 'Reading MSMS spectra files done!' print '\nDeisotoping MSMS spectra...' spec_dict = pickle.load(file('spectra.pickle')) deisotoped = dict() titles = spec_dict.keys() for i in range(len(titles)): title = titles[i] (one, align) = spec_dict[title].deisotope(mass, 4, 0.02) deisotoped[title] = one pickle.dump(deisotoped, file('deisotoped.pickle', 'w')) deisotoped = pickle.load(file('deisotoped.pickle')) spec_dict = deisotoped print 'Deisotoping MSMS spectra done!' print 'Current time %s' % ctime() print '\nBuilding index for all possible inter-peptide cross-links...' index = EnumIndexBuilder(param['target_database'], spec_dict, mass, param) pickle.dump(index, file('index.pickle', 'w')) index = pickle.load(file('index.pickle')) print 'Building index done!' print 'Current time %s' % ctime() print '\nComputing features for candidate PSMs for query spectra...' results = [] titles = [] for title in index.search_index.keys(): if len(index.search_index[title]) != 0: titles.append(title) length = len(titles) for i in range(0, length): print '%d / %d' % (i, length) sys.stdout.flush() title = titles[i] result = get_matches_per_spec(mass, param, index, title) result = [title, result] results.append(result) print 'Computing features done!\n' print 'Current time: %s' % ctime() pickle.dump(results, file('results.pickle', 'w')) results = pickle.load(file('results.pickle')) print 'Extracting authentic true-true PSMs...' true_true = get_true_true(results, index, param, mass) pickle.dump(true_true, file('TT.pickle', 'w')) print 'Extracting authentic true-true PSMs done!' print 'Extracting true-false PSMs based on true-true PSMs as seeds...' true_false = get_true_false(true_true, param, mass) pickle.dump(true_false, file('TF.pickle', 'w')) print 'Extracting true-false PSMs done!' print 'Extracting false-false PSMs based on true-true PSMs as seeds...' false_false = get_false_false(true_true, param, mass) pickle.dump(false_false, file('FF.pickle', 'w')) print 'Extracting false-false PSMs done!' print 'Computing feature matrix for true-true, true-false, false-false PSMs...' X_true_true = get_feature_matrix(true_true) X_true_false = get_feature_matrix(true_false) X_false_false = get_feature_matrix(false_false) X_TT_TF = np.concatenate((X_true_true, X_true_false), axis = 0) y_TT_TF = [] y_TT_TF.extend([1.0] * len(true_true)) y_TT_TF.extend([0.0] * len(true_false)) y_TT_TF = np.asarray(y_TT_TF) y_TT_TF = y_TT_TF.T X_TF_FF = np.concatenate((X_true_false, X_false_false), axis = 0) y_TF_FF = [] y_TF_FF.extend([1.0] * len(true_false)) y_TF_FF.extend([0.0] * len(false_false)) y_TF_FF = np.asarray(y_TF_FF) y_TF_FF = y_TF_FF.T print 'Computing features done!' from sklearn import linear_model log_reg = linear_model.LogisticRegression() log_reg.fit(X_TT_TF, y_TT_TF) model_TT_TF = [] model_TT_TF.extend(log_reg.intercept_.tolist()) model_TT_TF.extend(log_reg.coef_.tolist()) log_reg = linear_model.LogisticRegression() log_reg.fit(X_TF_FF, y_TF_FF) model_TF_FF = [] model_TF_FF.extend(log_reg.intercept_.tolist()) model_TF_FF.extend(log_reg.coef_.tolist()) f = open(output_file, 'w') f.write('# Classifier I (TT-TF) coefficients') for i in range(len(model_TT_TF)): f.write('CI%02d\t') f.write('%.60f\n' % model_TT_TF[i]) f.write('# Classifier II (TF-FF) coefficients') for i in range(len(model_TF_FF)): f.write('CII%02d\t') f.write('%.60f\n' % model_TF_FF[i]) f.write('nTT\t%d\n' % len(true_true)) f.write('nTF\t%d\n' % len(true_false)) f.write('nFF\t%d\n' % len(false_false)) f.close() print 'XLSearch train mode finished!'
mit
meduz/scikit-learn
examples/linear_model/plot_lasso_lars.py
363
1080
#!/usr/bin/env python """ ===================== Lasso path using LARS ===================== Computes Lasso Path along the regularization parameter using the LARS algorithm on the diabetes dataset. Each color represents a different feature of the coefficient vector, and this is displayed as a function of the regularization parameter. """ print(__doc__) # Author: Fabian Pedregosa <fabian.pedregosa@inria.fr> # Alexandre Gramfort <alexandre.gramfort@inria.fr> # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn import linear_model from sklearn import datasets diabetes = datasets.load_diabetes() X = diabetes.data y = diabetes.target print("Computing regularization path using the LARS ...") alphas, _, coefs = linear_model.lars_path(X, y, method='lasso', verbose=True) xx = np.sum(np.abs(coefs.T), axis=1) xx /= xx[-1] plt.plot(xx, coefs.T) ymin, ymax = plt.ylim() plt.vlines(xx, ymin, ymax, linestyle='dashed') plt.xlabel('|coef| / max|coef|') plt.ylabel('Coefficients') plt.title('LASSO Path') plt.axis('tight') plt.show()
bsd-3-clause
PrashntS/scikit-learn
examples/decomposition/plot_faces_decomposition.py
103
4394
""" ============================ Faces dataset decompositions ============================ This example applies to :ref:`olivetti_faces` different unsupervised matrix decomposition (dimension reduction) methods from the module :py:mod:`sklearn.decomposition` (see the documentation chapter :ref:`decompositions`) . """ print(__doc__) # Authors: Vlad Niculae, Alexandre Gramfort # License: BSD 3 clause import logging from time import time from numpy.random import RandomState import matplotlib.pyplot as plt from sklearn.datasets import fetch_olivetti_faces from sklearn.cluster import MiniBatchKMeans from sklearn import decomposition # Display progress logs on stdout logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s') n_row, n_col = 2, 3 n_components = n_row * n_col image_shape = (64, 64) rng = RandomState(0) ############################################################################### # Load faces data dataset = fetch_olivetti_faces(shuffle=True, random_state=rng) faces = dataset.data n_samples, n_features = faces.shape # global centering faces_centered = faces - faces.mean(axis=0) # local centering faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1) print("Dataset consists of %d faces" % n_samples) ############################################################################### def plot_gallery(title, images, n_col=n_col, n_row=n_row): plt.figure(figsize=(2. * n_col, 2.26 * n_row)) plt.suptitle(title, size=16) for i, comp in enumerate(images): plt.subplot(n_row, n_col, i + 1) vmax = max(comp.max(), -comp.min()) plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray, interpolation='nearest', vmin=-vmax, vmax=vmax) plt.xticks(()) plt.yticks(()) plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.) ############################################################################### # List of the different estimators, whether to center and transpose the # problem, and whether the transformer uses the clustering API. estimators = [ ('Eigenfaces - RandomizedPCA', decomposition.RandomizedPCA(n_components=n_components, whiten=True), True), ('Non-negative components - NMF', decomposition.NMF(n_components=n_components, init='nndsvda', tol=5e-3), False), ('Independent components - FastICA', decomposition.FastICA(n_components=n_components, whiten=True), True), ('Sparse comp. - MiniBatchSparsePCA', decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8, n_iter=100, batch_size=3, random_state=rng), True), ('MiniBatchDictionaryLearning', decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1, n_iter=50, batch_size=3, random_state=rng), True), ('Cluster centers - MiniBatchKMeans', MiniBatchKMeans(n_clusters=n_components, tol=1e-3, batch_size=20, max_iter=50, random_state=rng), True), ('Factor Analysis components - FA', decomposition.FactorAnalysis(n_components=n_components, max_iter=2), True), ] ############################################################################### # Plot a sample of the input data plot_gallery("First centered Olivetti faces", faces_centered[:n_components]) ############################################################################### # Do the estimation and plot it for name, estimator, center in estimators: print("Extracting the top %d %s..." % (n_components, name)) t0 = time() data = faces if center: data = faces_centered estimator.fit(data) train_time = (time() - t0) print("done in %0.3fs" % train_time) if hasattr(estimator, 'cluster_centers_'): components_ = estimator.cluster_centers_ else: components_ = estimator.components_ if hasattr(estimator, 'noise_variance_'): plot_gallery("Pixelwise variance", estimator.noise_variance_.reshape(1, -1), n_col=1, n_row=1) plot_gallery('%s - Train time %.1fs' % (name, train_time), components_[:n_components]) plt.show()
bsd-3-clause
vermouthmjl/scikit-learn
sklearn/metrics/classification.py
1
69294
"""Metrics to assess performance on classification task given class prediction Functions named as ``*_score`` return a scalar value to maximize: the higher the better Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize: the lower the better """ # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Mathieu Blondel <mathieu@mblondel.org> # Olivier Grisel <olivier.grisel@ensta.org> # Arnaud Joly <a.joly@ulg.ac.be> # Jochen Wersdorfer <jochen@wersdoerfer.de> # Lars Buitinck # Joel Nothman <joel.nothman@gmail.com> # Noel Dawe <noel@dawe.me> # Jatin Shah <jatindshah@gmail.com> # Saurabh Jha <saurabh.jhaa@gmail.com> # Bernardo Stein <bernardovstein@gmail.com> # License: BSD 3 clause from __future__ import division import warnings import numpy as np from scipy.sparse import coo_matrix from scipy.sparse import csr_matrix from ..preprocessing import LabelBinarizer, label_binarize from ..preprocessing import LabelEncoder from ..utils import check_array from ..utils import check_consistent_length from ..utils import column_or_1d from ..utils.multiclass import unique_labels from ..utils.multiclass import type_of_target from ..utils.validation import _num_samples from ..utils.sparsefuncs import count_nonzero from ..utils.fixes import bincount from ..exceptions import UndefinedMetricWarning def _check_targets(y_true, y_pred): """Check that y_true and y_pred belong to the same classification task This converts multiclass or binary types to a common shape, and raises a ValueError for a mix of multilabel and multiclass targets, a mix of multilabel formats, for the presence of continuous-valued or multioutput targets, or for targets of different lengths. Column vectors are squeezed to 1d, while multilabel formats are returned as CSR sparse label indicators. Parameters ---------- y_true : array-like y_pred : array-like Returns ------- type_true : one of {'multilabel-indicator', 'multiclass', 'binary'} The type of the true target data, as output by ``utils.multiclass.type_of_target`` y_true : array or indicator matrix y_pred : array or indicator matrix """ check_consistent_length(y_true, y_pred) type_true = type_of_target(y_true) type_pred = type_of_target(y_pred) y_type = set([type_true, type_pred]) if y_type == set(["binary", "multiclass"]): y_type = set(["multiclass"]) if len(y_type) > 1: raise ValueError("Can't handle mix of {0} and {1}" "".format(type_true, type_pred)) # We can't have more than one value on y_type => The set is no more needed y_type = y_type.pop() # No metrics support "multiclass-multioutput" format if (y_type not in ["binary", "multiclass", "multilabel-indicator"]): raise ValueError("{0} is not supported".format(y_type)) if y_type in ["binary", "multiclass"]: y_true = column_or_1d(y_true) y_pred = column_or_1d(y_pred) if y_type.startswith('multilabel'): y_true = csr_matrix(y_true) y_pred = csr_matrix(y_pred) y_type = 'multilabel-indicator' return y_type, y_true, y_pred def _weighted_sum(sample_score, sample_weight, normalize=False): if normalize: return np.average(sample_score, weights=sample_weight) elif sample_weight is not None: return np.dot(sample_score, sample_weight) else: return sample_score.sum() def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None): """Accuracy classification score. In multilabel classification, this function computes subset accuracy: the set of labels predicted for a sample must *exactly* match the corresponding set of labels in y_true. Read more in the :ref:`User Guide <accuracy_score>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) labels. y_pred : 1d array-like, or label indicator array / sparse matrix Predicted labels, as returned by a classifier. normalize : bool, optional (default=True) If ``False``, return the number of correctly classified samples. Otherwise, return the fraction of correctly classified samples. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- score : float If ``normalize == True``, return the correctly classified samples (float), else it returns the number of correctly classified samples (int). The best performance is 1 with ``normalize == True`` and the number of samples with ``normalize == False``. See also -------- jaccard_similarity_score, hamming_loss, zero_one_loss Notes ----- In binary and multiclass classification, this function is equal to the ``jaccard_similarity_score`` function. Examples -------- >>> import numpy as np >>> from sklearn.metrics import accuracy_score >>> y_pred = [0, 2, 1, 3] >>> y_true = [0, 1, 2, 3] >>> accuracy_score(y_true, y_pred) 0.5 >>> accuracy_score(y_true, y_pred, normalize=False) 2 In the multilabel case with binary label indicators: >>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2))) 0.5 """ # Compute accuracy for each possible representation y_type, y_true, y_pred = _check_targets(y_true, y_pred) if y_type.startswith('multilabel'): differing_labels = count_nonzero(y_true - y_pred, axis=1) score = differing_labels == 0 else: score = y_true == y_pred return _weighted_sum(score, sample_weight, normalize) def confusion_matrix(y_true, y_pred, labels=None, sample_weight=None): """Compute confusion matrix to evaluate the accuracy of a classification By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}` is equal to the number of observations known to be in group :math:`i` but predicted to be in group :math:`j`. Read more in the :ref:`User Guide <confusion_matrix>`. Parameters ---------- y_true : array, shape = [n_samples] Ground truth (correct) target values. y_pred : array, shape = [n_samples] Estimated targets as returned by a classifier. labels : array, shape = [n_classes], optional List of labels to index the matrix. This may be used to reorder or select a subset of labels. If none is given, those that appear at least once in ``y_true`` or ``y_pred`` are used in sorted order. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- C : array, shape = [n_classes, n_classes] Confusion matrix References ---------- .. [1] `Wikipedia entry for the Confusion matrix <https://en.wikipedia.org/wiki/Confusion_matrix>`_ Examples -------- >>> from sklearn.metrics import confusion_matrix >>> y_true = [2, 0, 2, 2, 0, 1] >>> y_pred = [0, 0, 2, 2, 0, 2] >>> confusion_matrix(y_true, y_pred) array([[2, 0, 0], [0, 0, 1], [1, 0, 2]]) >>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"] >>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"] >>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"]) array([[2, 0, 0], [0, 0, 1], [1, 0, 2]]) """ y_type, y_true, y_pred = _check_targets(y_true, y_pred) if y_type not in ("binary", "multiclass"): raise ValueError("%s is not supported" % y_type) if labels is None: labels = unique_labels(y_true, y_pred) else: labels = np.asarray(labels) if sample_weight is None: sample_weight = np.ones(y_true.shape[0], dtype=np.int) else: sample_weight = np.asarray(sample_weight) check_consistent_length(sample_weight, y_true, y_pred) n_labels = labels.size label_to_ind = dict((y, x) for x, y in enumerate(labels)) # convert yt, yp into index y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred]) y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true]) # intersect y_pred, y_true with labels, eliminate items not in labels ind = np.logical_and(y_pred < n_labels, y_true < n_labels) y_pred = y_pred[ind] y_true = y_true[ind] # also eliminate weights of eliminated items sample_weight = sample_weight[ind] CM = coo_matrix((sample_weight, (y_true, y_pred)), shape=(n_labels, n_labels) ).toarray() return CM def cohen_kappa_score(y1, y2, labels=None): """Cohen's kappa: a statistic that measures inter-annotator agreement. This function computes Cohen's kappa [1], a score that expresses the level of agreement between two annotators on a classification problem. It is defined as .. math:: \kappa = (p_o - p_e) / (1 - p_e) where :math:`p_o` is the empirical probability of agreement on the label assigned to any sample (the observed agreement ratio), and :math:`p_e` is the expected agreement when both annotators assign labels randomly. :math:`p_e` is estimated using a per-annotator empirical prior over the class labels [2]. Parameters ---------- y1 : array, shape = [n_samples] Labels assigned by the first annotator. y2 : array, shape = [n_samples] Labels assigned by the second annotator. The kappa statistic is symmetric, so swapping ``y1`` and ``y2`` doesn't change the value. labels : array, shape = [n_classes], optional List of labels to index the matrix. This may be used to select a subset of labels. If None, all labels that appear at least once in ``y1`` or ``y2`` are used. Returns ------- kappa : float The kappa statistic, which is a number between -1 and 1. The maximum value means complete agreement; zero or lower means chance agreement. References ---------- .. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales". Educational and Psychological Measurement 20(1):37-46. doi:10.1177/001316446002000104. .. [2] R. Artstein and M. Poesio (2008). "Inter-coder agreement for computational linguistics". Computational Linguistic 34(4):555-596. """ confusion = confusion_matrix(y1, y2, labels=labels) P = confusion / float(confusion.sum()) p_observed = np.trace(P) p_expected = np.dot(P.sum(axis=0), P.sum(axis=1)) return (p_observed - p_expected) / (1 - p_expected) def jaccard_similarity_score(y_true, y_pred, normalize=True, sample_weight=None): """Jaccard similarity coefficient score The Jaccard index [1], or Jaccard similarity coefficient, defined as the size of the intersection divided by the size of the union of two label sets, is used to compare set of predicted labels for a sample to the corresponding set of labels in ``y_true``. Read more in the :ref:`User Guide <jaccard_similarity_score>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) labels. y_pred : 1d array-like, or label indicator array / sparse matrix Predicted labels, as returned by a classifier. normalize : bool, optional (default=True) If ``False``, return the sum of the Jaccard similarity coefficient over the sample set. Otherwise, return the average of Jaccard similarity coefficient. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- score : float If ``normalize == True``, return the average Jaccard similarity coefficient, else it returns the sum of the Jaccard similarity coefficient over the sample set. The best performance is 1 with ``normalize == True`` and the number of samples with ``normalize == False``. See also -------- accuracy_score, hamming_loss, zero_one_loss Notes ----- In binary and multiclass classification, this function is equivalent to the ``accuracy_score``. It differs in the multilabel classification problem. References ---------- .. [1] `Wikipedia entry for the Jaccard index <https://en.wikipedia.org/wiki/Jaccard_index>`_ Examples -------- >>> import numpy as np >>> from sklearn.metrics import jaccard_similarity_score >>> y_pred = [0, 2, 1, 3] >>> y_true = [0, 1, 2, 3] >>> jaccard_similarity_score(y_true, y_pred) 0.5 >>> jaccard_similarity_score(y_true, y_pred, normalize=False) 2 In the multilabel case with binary label indicators: >>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]),\ np.ones((2, 2))) 0.75 """ # Compute accuracy for each possible representation y_type, y_true, y_pred = _check_targets(y_true, y_pred) if y_type.startswith('multilabel'): with np.errstate(divide='ignore', invalid='ignore'): # oddly, we may get an "invalid" rather than a "divide" error here pred_or_true = count_nonzero(y_true + y_pred, axis=1) pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1) score = pred_and_true / pred_or_true # If there is no label, it results in a Nan instead, we set # the jaccard to 1: lim_{x->0} x/x = 1 # Note with py2.6 and np 1.3: we can't check safely for nan. score[pred_or_true == 0.0] = 1.0 else: score = y_true == y_pred return _weighted_sum(score, sample_weight, normalize) def matthews_corrcoef(y_true, y_pred, sample_weight=None): """Compute the Matthews correlation coefficient (MCC) for binary classes The Matthews correlation coefficient is used in machine learning as a measure of the quality of binary (two-class) classifications. It takes into account true and false positives and negatives and is generally regarded as a balanced measure which can be used even if the classes are of very different sizes. The MCC is in essence a correlation coefficient value between -1 and +1. A coefficient of +1 represents a perfect prediction, 0 an average random prediction and -1 an inverse prediction. The statistic is also known as the phi coefficient. [source: Wikipedia] Only in the binary case does this relate to information about true and false positives and negatives. See references below. Read more in the :ref:`User Guide <matthews_corrcoef>`. Parameters ---------- y_true : array, shape = [n_samples] Ground truth (correct) target values. y_pred : array, shape = [n_samples] Estimated targets as returned by a classifier. sample_weight : array-like of shape = [n_samples], default None Sample weights. Returns ------- mcc : float The Matthews correlation coefficient (+1 represents a perfect prediction, 0 an average random prediction and -1 and inverse prediction). References ---------- .. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the accuracy of prediction algorithms for classification: an overview <http://dx.doi.org/10.1093/bioinformatics/16.5.412>`_ .. [2] `Wikipedia entry for the Matthews Correlation Coefficient <https://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_ Examples -------- >>> from sklearn.metrics import matthews_corrcoef >>> y_true = [+1, +1, +1, -1] >>> y_pred = [+1, -1, +1, +1] >>> matthews_corrcoef(y_true, y_pred) # doctest: +ELLIPSIS -0.33... """ y_type, y_true, y_pred = _check_targets(y_true, y_pred) if y_type != "binary": raise ValueError("%s is not supported" % y_type) lb = LabelEncoder() lb.fit(np.hstack([y_true, y_pred])) y_true = lb.transform(y_true) y_pred = lb.transform(y_pred) mean_yt = np.average(y_true, weights=sample_weight) mean_yp = np.average(y_pred, weights=sample_weight) y_true_u_cent = y_true - mean_yt y_pred_u_cent = y_pred - mean_yp cov_ytyp = np.average(y_true_u_cent * y_pred_u_cent, weights=sample_weight) var_yt = np.average(y_true_u_cent ** 2, weights=sample_weight) var_yp = np.average(y_pred_u_cent ** 2, weights=sample_weight) mcc = cov_ytyp / np.sqrt(var_yt * var_yp) if np.isnan(mcc): return 0. else: return mcc def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None): """Zero-one classification loss. If normalize is ``True``, return the fraction of misclassifications (float), else it returns the number of misclassifications (int). The best performance is 0. Read more in the :ref:`User Guide <zero_one_loss>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) labels. y_pred : 1d array-like, or label indicator array / sparse matrix Predicted labels, as returned by a classifier. normalize : bool, optional (default=True) If ``False``, return the number of misclassifications. Otherwise, return the fraction of misclassifications. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- loss : float or int, If ``normalize == True``, return the fraction of misclassifications (float), else it returns the number of misclassifications (int). Notes ----- In multilabel classification, the zero_one_loss function corresponds to the subset zero-one loss: for each sample, the entire set of labels must be correctly predicted, otherwise the loss for that sample is equal to one. See also -------- accuracy_score, hamming_loss, jaccard_similarity_score Examples -------- >>> from sklearn.metrics import zero_one_loss >>> y_pred = [1, 2, 3, 4] >>> y_true = [2, 2, 3, 4] >>> zero_one_loss(y_true, y_pred) 0.25 >>> zero_one_loss(y_true, y_pred, normalize=False) 1 In the multilabel case with binary label indicators: >>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2))) 0.5 """ score = accuracy_score(y_true, y_pred, normalize=normalize, sample_weight=sample_weight) if normalize: return 1 - score else: if sample_weight is not None: n_samples = np.sum(sample_weight) else: n_samples = _num_samples(y_true) return n_samples - score def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary', sample_weight=None): """Compute the F1 score, also known as balanced F-score or F-measure The F1 score can be interpreted as a weighted average of the precision and recall, where an F1 score reaches its best value at 1 and worst score at 0. The relative contribution of precision and recall to the F1 score are equal. The formula for the F1 score is:: F1 = 2 * (precision * recall) / (precision + recall) In the multi-class and multi-label case, this is the weighted average of the F1 score of each class. Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) target values. y_pred : 1d array-like, or label indicator array / sparse matrix Estimated targets as returned by a classifier. labels : list, optional The set of labels to include when ``average != 'binary'``, and their order if ``average is None``. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in ``y_true`` and ``y_pred`` are used in sorted order. .. versionchanged:: 0.17 parameter *labels* improved for multiclass problem. pos_label : str or int, 1 by default The class to report if ``average='binary'``. Until version 0.18 it is necessary to set ``pos_label=None`` if seeking to use another averaging method over binary targets. average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \ 'weighted'] This parameter is required for multiclass/multilabel targets. If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data: ``'binary'``: Only report results for the class specified by ``pos_label``. This is applicable only if targets (``y_{true,pred}``) are binary. ``'micro'``: Calculate metrics globally by counting the total true positives, false negatives and false positives. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). This alters 'macro' to account for label imbalance; it can result in an F-score that is not between precision and recall. ``'samples'``: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification where this differs from :func:`accuracy_score`). Note that if ``pos_label`` is given in binary classification with `average != 'binary'`, only that positive class is reported. This behavior is deprecated and will change in version 0.18. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- f1_score : float or array of float, shape = [n_unique_labels] F1 score of the positive class in binary classification or weighted average of the F1 scores of each class for the multiclass task. References ---------- .. [1] `Wikipedia entry for the F1-score <https://en.wikipedia.org/wiki/F1_score>`_ Examples -------- >>> from sklearn.metrics import f1_score >>> y_true = [0, 1, 2, 0, 1, 2] >>> y_pred = [0, 2, 1, 0, 0, 1] >>> f1_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS 0.26... >>> f1_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS 0.33... >>> f1_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS 0.26... >>> f1_score(y_true, y_pred, average=None) array([ 0.8, 0. , 0. ]) """ return fbeta_score(y_true, y_pred, 1, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight) def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1, average='binary', sample_weight=None): """Compute the F-beta score The F-beta score is the weighted harmonic mean of precision and recall, reaching its optimal value at 1 and its worst value at 0. The `beta` parameter determines the weight of precision in the combined score. ``beta < 1`` lends more weight to precision, while ``beta > 1`` favors recall (``beta -> 0`` considers only precision, ``beta -> inf`` only recall). Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) target values. y_pred : 1d array-like, or label indicator array / sparse matrix Estimated targets as returned by a classifier. beta: float Weight of precision in harmonic mean. labels : list, optional The set of labels to include when ``average != 'binary'``, and their order if ``average is None``. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in ``y_true`` and ``y_pred`` are used in sorted order. .. versionchanged:: 0.17 parameter *labels* improved for multiclass problem. pos_label : str or int, 1 by default The class to report if ``average='binary'``. Until version 0.18 it is necessary to set ``pos_label=None`` if seeking to use another averaging method over binary targets. average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \ 'weighted'] This parameter is required for multiclass/multilabel targets. If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data: ``'binary'``: Only report results for the class specified by ``pos_label``. This is applicable only if targets (``y_{true,pred}``) are binary. ``'micro'``: Calculate metrics globally by counting the total true positives, false negatives and false positives. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). This alters 'macro' to account for label imbalance; it can result in an F-score that is not between precision and recall. ``'samples'``: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification where this differs from :func:`accuracy_score`). Note that if ``pos_label`` is given in binary classification with `average != 'binary'`, only that positive class is reported. This behavior is deprecated and will change in version 0.18. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- fbeta_score : float (if average is not None) or array of float, shape =\ [n_unique_labels] F-beta score of the positive class in binary classification or weighted average of the F-beta score of each class for the multiclass task. References ---------- .. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern Information Retrieval. Addison Wesley, pp. 327-328. .. [2] `Wikipedia entry for the F1-score <https://en.wikipedia.org/wiki/F1_score>`_ Examples -------- >>> from sklearn.metrics import fbeta_score >>> y_true = [0, 1, 2, 0, 1, 2] >>> y_pred = [0, 2, 1, 0, 0, 1] >>> fbeta_score(y_true, y_pred, average='macro', beta=0.5) ... # doctest: +ELLIPSIS 0.23... >>> fbeta_score(y_true, y_pred, average='micro', beta=0.5) ... # doctest: +ELLIPSIS 0.33... >>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5) ... # doctest: +ELLIPSIS 0.23... >>> fbeta_score(y_true, y_pred, average=None, beta=0.5) ... # doctest: +ELLIPSIS array([ 0.71..., 0. , 0. ]) """ _, _, f, _ = precision_recall_fscore_support(y_true, y_pred, beta=beta, labels=labels, pos_label=pos_label, average=average, warn_for=('f-score',), sample_weight=sample_weight) return f def _prf_divide(numerator, denominator, metric, modifier, average, warn_for): """Performs division and handles divide-by-zero. On zero-division, sets the corresponding result elements to zero and raises a warning. The metric, modifier and average arguments are used only for determining an appropriate warning. """ result = numerator / denominator mask = denominator == 0.0 if not np.any(mask): return result # remove infs result[mask] = 0.0 # build appropriate warning # E.g. "Precision and F-score are ill-defined and being set to 0.0 in # labels with no predicted samples" axis0 = 'sample' axis1 = 'label' if average == 'samples': axis0, axis1 = axis1, axis0 if metric in warn_for and 'f-score' in warn_for: msg_start = '{0} and F-score are'.format(metric.title()) elif metric in warn_for: msg_start = '{0} is'.format(metric.title()) elif 'f-score' in warn_for: msg_start = 'F-score is' else: return result msg = ('{0} ill-defined and being set to 0.0 {{0}} ' 'no {1} {2}s.'.format(msg_start, modifier, axis0)) if len(mask) == 1: msg = msg.format('due to') else: msg = msg.format('in {0}s with'.format(axis1)) warnings.warn(msg, UndefinedMetricWarning, stacklevel=2) return result def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None, pos_label=1, average=None, warn_for=('precision', 'recall', 'f-score'), sample_weight=None): """Compute precision, recall, F-measure and support for each class The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of true positives and ``fp`` the number of false positives. The precision is intuitively the ability of the classifier not to label as positive a sample that is negative. The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of true positives and ``fn`` the number of false negatives. The recall is intuitively the ability of the classifier to find all the positive samples. The F-beta score can be interpreted as a weighted harmonic mean of the precision and recall, where an F-beta score reaches its best value at 1 and worst score at 0. The F-beta score weights recall more than precision by a factor of ``beta``. ``beta == 1.0`` means recall and precision are equally important. The support is the number of occurrences of each class in ``y_true``. If ``pos_label is None`` and in binary classification, this function returns the average precision, recall and F-measure if ``average`` is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``. Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) target values. y_pred : 1d array-like, or label indicator array / sparse matrix Estimated targets as returned by a classifier. beta : float, 1.0 by default The strength of recall versus precision in the F-score. labels : list, optional The set of labels to include when ``average != 'binary'``, and their order if ``average is None``. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in ``y_true`` and ``y_pred`` are used in sorted order. pos_label : str or int, 1 by default The class to report if ``average='binary'``. Until version 0.18 it is necessary to set ``pos_label=None`` if seeking to use another averaging method over binary targets. average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \ 'weighted'] If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data: ``'binary'``: Only report results for the class specified by ``pos_label``. This is applicable only if targets (``y_{true,pred}``) are binary. ``'micro'``: Calculate metrics globally by counting the total true positives, false negatives and false positives. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). This alters 'macro' to account for label imbalance; it can result in an F-score that is not between precision and recall. ``'samples'``: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification where this differs from :func:`accuracy_score`). Note that if ``pos_label`` is given in binary classification with `average != 'binary'`, only that positive class is reported. This behavior is deprecated and will change in version 0.18. warn_for : tuple or set, for internal use This determines which warnings will be made in the case that this function is being used to return only one of its metrics. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- precision: float (if average is not None) or array of float, shape =\ [n_unique_labels] recall: float (if average is not None) or array of float, , shape =\ [n_unique_labels] fbeta_score: float (if average is not None) or array of float, shape =\ [n_unique_labels] support: int (if average is not None) or array of int, shape =\ [n_unique_labels] The number of occurrences of each label in ``y_true``. References ---------- .. [1] `Wikipedia entry for the Precision and recall <https://en.wikipedia.org/wiki/Precision_and_recall>`_ .. [2] `Wikipedia entry for the F1-score <https://en.wikipedia.org/wiki/F1_score>`_ .. [3] `Discriminative Methods for Multi-labeled Classification Advances in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu Godbole, Sunita Sarawagi <http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>` Examples -------- >>> from sklearn.metrics import precision_recall_fscore_support >>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig']) >>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog']) >>> precision_recall_fscore_support(y_true, y_pred, average='macro') ... # doctest: +ELLIPSIS (0.22..., 0.33..., 0.26..., None) >>> precision_recall_fscore_support(y_true, y_pred, average='micro') ... # doctest: +ELLIPSIS (0.33..., 0.33..., 0.33..., None) >>> precision_recall_fscore_support(y_true, y_pred, average='weighted') ... # doctest: +ELLIPSIS (0.22..., 0.33..., 0.26..., None) It is possible to compute per-label precisions, recalls, F1-scores and supports instead of averaging: >>> precision_recall_fscore_support(y_true, y_pred, average=None, ... labels=['pig', 'dog', 'cat']) ... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE (array([ 0. , 0. , 0.66...]), array([ 0., 0., 1.]), array([ 0. , 0. , 0.8]), array([2, 2, 2])) """ average_options = (None, 'micro', 'macro', 'weighted', 'samples') if average not in average_options and average != 'binary': raise ValueError('average has to be one of ' + str(average_options)) if beta <= 0: raise ValueError("beta should be >0 in the F-beta score") y_type, y_true, y_pred = _check_targets(y_true, y_pred) present_labels = unique_labels(y_true, y_pred) if average == 'binary' and (y_type != 'binary' or pos_label is None): warnings.warn('The default `weighted` averaging is deprecated, ' 'and from version 0.18, use of precision, recall or ' 'F-score with multiclass or multilabel data or ' 'pos_label=None will result in an exception. ' 'Please set an explicit value for `average`, one of ' '%s. In cross validation use, for instance, ' 'scoring="f1_weighted" instead of scoring="f1".' % str(average_options), DeprecationWarning, stacklevel=2) average = 'weighted' if y_type == 'binary' and pos_label is not None and average is not None: if average != 'binary': warnings.warn('From version 0.18, binary input will not be ' 'handled specially when using averaged ' 'precision/recall/F-score. ' 'Please use average=\'binary\' to report only the ' 'positive class performance.', DeprecationWarning) if labels is None or len(labels) <= 2: if pos_label not in present_labels: if len(present_labels) < 2: # Only negative labels return (0., 0., 0., 0) else: raise ValueError("pos_label=%r is not a valid label: %r" % (pos_label, present_labels)) labels = [pos_label] if labels is None: labels = present_labels n_labels = None else: n_labels = len(labels) labels = np.hstack([labels, np.setdiff1d(present_labels, labels, assume_unique=True)]) # Calculate tp_sum, pred_sum, true_sum ### if y_type.startswith('multilabel'): sum_axis = 1 if average == 'samples' else 0 # All labels are index integers for multilabel. # Select labels: if not np.all(labels == present_labels): if np.max(labels) > np.max(present_labels): raise ValueError('All labels must be in [0, n labels). ' 'Got %d > %d' % (np.max(labels), np.max(present_labels))) if np.min(labels) < 0: raise ValueError('All labels must be in [0, n labels). ' 'Got %d < 0' % np.min(labels)) y_true = y_true[:, labels[:n_labels]] y_pred = y_pred[:, labels[:n_labels]] # calculate weighted counts true_and_pred = y_true.multiply(y_pred) tp_sum = count_nonzero(true_and_pred, axis=sum_axis, sample_weight=sample_weight) pred_sum = count_nonzero(y_pred, axis=sum_axis, sample_weight=sample_weight) true_sum = count_nonzero(y_true, axis=sum_axis, sample_weight=sample_weight) elif average == 'samples': raise ValueError("Sample-based precision, recall, fscore is " "not meaningful outside multilabel " "classification. See the accuracy_score instead.") else: le = LabelEncoder() le.fit(labels) y_true = le.transform(y_true) y_pred = le.transform(y_pred) sorted_labels = le.classes_ # labels are now from 0 to len(labels) - 1 -> use bincount tp = y_true == y_pred tp_bins = y_true[tp] if sample_weight is not None: tp_bins_weights = np.asarray(sample_weight)[tp] else: tp_bins_weights = None if len(tp_bins): tp_sum = bincount(tp_bins, weights=tp_bins_weights, minlength=len(labels)) else: # Pathological case true_sum = pred_sum = tp_sum = np.zeros(len(labels)) if len(y_pred): pred_sum = bincount(y_pred, weights=sample_weight, minlength=len(labels)) if len(y_true): true_sum = bincount(y_true, weights=sample_weight, minlength=len(labels)) # Retain only selected labels indices = np.searchsorted(sorted_labels, labels[:n_labels]) tp_sum = tp_sum[indices] true_sum = true_sum[indices] pred_sum = pred_sum[indices] if average == 'micro': tp_sum = np.array([tp_sum.sum()]) pred_sum = np.array([pred_sum.sum()]) true_sum = np.array([true_sum.sum()]) # Finally, we have all our sufficient statistics. Divide! # beta2 = beta ** 2 with np.errstate(divide='ignore', invalid='ignore'): # Divide, and on zero-division, set scores to 0 and warn: # Oddly, we may get an "invalid" rather than a "divide" error # here. precision = _prf_divide(tp_sum, pred_sum, 'precision', 'predicted', average, warn_for) recall = _prf_divide(tp_sum, true_sum, 'recall', 'true', average, warn_for) # Don't need to warn for F: either P or R warned, or tp == 0 where pos # and true are nonzero, in which case, F is well-defined and zero f_score = ((1 + beta2) * precision * recall / (beta2 * precision + recall)) f_score[tp_sum == 0] = 0.0 # Average the results if average == 'weighted': weights = true_sum if weights.sum() == 0: return 0, 0, 0, None elif average == 'samples': weights = sample_weight else: weights = None if average is not None: assert average != 'binary' or len(precision) == 1 precision = np.average(precision, weights=weights) recall = np.average(recall, weights=weights) f_score = np.average(f_score, weights=weights) true_sum = None # return no support return precision, recall, f_score, true_sum def precision_score(y_true, y_pred, labels=None, pos_label=1, average='binary', sample_weight=None): """Compute the precision The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of true positives and ``fp`` the number of false positives. The precision is intuitively the ability of the classifier not to label as positive a sample that is negative. The best value is 1 and the worst value is 0. Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) target values. y_pred : 1d array-like, or label indicator array / sparse matrix Estimated targets as returned by a classifier. labels : list, optional The set of labels to include when ``average != 'binary'``, and their order if ``average is None``. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in ``y_true`` and ``y_pred`` are used in sorted order. .. versionchanged:: 0.17 parameter *labels* improved for multiclass problem. pos_label : str or int, 1 by default The class to report if ``average='binary'``. Until version 0.18 it is necessary to set ``pos_label=None`` if seeking to use another averaging method over binary targets. average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \ 'weighted'] This parameter is required for multiclass/multilabel targets. If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data: ``'binary'``: Only report results for the class specified by ``pos_label``. This is applicable only if targets (``y_{true,pred}``) are binary. ``'micro'``: Calculate metrics globally by counting the total true positives, false negatives and false positives. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). This alters 'macro' to account for label imbalance; it can result in an F-score that is not between precision and recall. ``'samples'``: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification where this differs from :func:`accuracy_score`). Note that if ``pos_label`` is given in binary classification with `average != 'binary'`, only that positive class is reported. This behavior is deprecated and will change in version 0.18. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- precision : float (if average is not None) or array of float, shape =\ [n_unique_labels] Precision of the positive class in binary classification or weighted average of the precision of each class for the multiclass task. Examples -------- >>> from sklearn.metrics import precision_score >>> y_true = [0, 1, 2, 0, 1, 2] >>> y_pred = [0, 2, 1, 0, 0, 1] >>> precision_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS 0.22... >>> precision_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS 0.33... >>> precision_score(y_true, y_pred, average='weighted') ... # doctest: +ELLIPSIS 0.22... >>> precision_score(y_true, y_pred, average=None) # doctest: +ELLIPSIS array([ 0.66..., 0. , 0. ]) """ p, _, _, _ = precision_recall_fscore_support(y_true, y_pred, labels=labels, pos_label=pos_label, average=average, warn_for=('precision',), sample_weight=sample_weight) return p def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary', sample_weight=None): """Compute the recall The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of true positives and ``fn`` the number of false negatives. The recall is intuitively the ability of the classifier to find all the positive samples. The best value is 1 and the worst value is 0. Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) target values. y_pred : 1d array-like, or label indicator array / sparse matrix Estimated targets as returned by a classifier. labels : list, optional The set of labels to include when ``average != 'binary'``, and their order if ``average is None``. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in ``y_true`` and ``y_pred`` are used in sorted order. .. versionchanged:: 0.17 parameter *labels* improved for multiclass problem. pos_label : str or int, 1 by default The class to report if ``average='binary'``. Until version 0.18 it is necessary to set ``pos_label=None`` if seeking to use another averaging method over binary targets. average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \ 'weighted'] This parameter is required for multiclass/multilabel targets. If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data: ``'binary'``: Only report results for the class specified by ``pos_label``. This is applicable only if targets (``y_{true,pred}``) are binary. ``'micro'``: Calculate metrics globally by counting the total true positives, false negatives and false positives. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). This alters 'macro' to account for label imbalance; it can result in an F-score that is not between precision and recall. ``'samples'``: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification where this differs from :func:`accuracy_score`). Note that if ``pos_label`` is given in binary classification with `average != 'binary'`, only that positive class is reported. This behavior is deprecated and will change in version 0.18. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- recall : float (if average is not None) or array of float, shape =\ [n_unique_labels] Recall of the positive class in binary classification or weighted average of the recall of each class for the multiclass task. Examples -------- >>> from sklearn.metrics import recall_score >>> y_true = [0, 1, 2, 0, 1, 2] >>> y_pred = [0, 2, 1, 0, 0, 1] >>> recall_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS 0.33... >>> recall_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS 0.33... >>> recall_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS 0.33... >>> recall_score(y_true, y_pred, average=None) array([ 1., 0., 0.]) """ _, r, _, _ = precision_recall_fscore_support(y_true, y_pred, labels=labels, pos_label=pos_label, average=average, warn_for=('recall',), sample_weight=sample_weight) return r def classification_report(y_true, y_pred, labels=None, target_names=None, sample_weight=None, digits=2): """Build a text report showing the main classification metrics Read more in the :ref:`User Guide <classification_report>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) target values. y_pred : 1d array-like, or label indicator array / sparse matrix Estimated targets as returned by a classifier. labels : array, shape = [n_labels] Optional list of label indices to include in the report. target_names : list of strings Optional display names matching the labels (same order). sample_weight : array-like of shape = [n_samples], optional Sample weights. digits : int Number of digits for formatting output floating point values Returns ------- report : string Text summary of the precision, recall, F1 score for each class. Examples -------- >>> from sklearn.metrics import classification_report >>> y_true = [0, 1, 2, 2, 2] >>> y_pred = [0, 0, 2, 2, 1] >>> target_names = ['class 0', 'class 1', 'class 2'] >>> print(classification_report(y_true, y_pred, target_names=target_names)) precision recall f1-score support <BLANKLINE> class 0 0.50 1.00 0.67 1 class 1 0.00 0.00 0.00 1 class 2 1.00 0.67 0.80 3 <BLANKLINE> avg / total 0.70 0.60 0.61 5 <BLANKLINE> """ if labels is None: labels = unique_labels(y_true, y_pred) else: labels = np.asarray(labels) last_line_heading = 'avg / total' if target_names is None: target_names = ['%s' % l for l in labels] name_width = max(len(cn) for cn in target_names) width = max(name_width, len(last_line_heading), digits) headers = ["precision", "recall", "f1-score", "support"] fmt = '%% %ds' % width # first column: class name fmt += ' ' fmt += ' '.join(['% 9s' for _ in headers]) fmt += '\n' headers = [""] + headers report = fmt % tuple(headers) report += '\n' p, r, f1, s = precision_recall_fscore_support(y_true, y_pred, labels=labels, average=None, sample_weight=sample_weight) for i, label in enumerate(labels): values = [target_names[i]] for v in (p[i], r[i], f1[i]): values += ["{0:0.{1}f}".format(v, digits)] values += ["{0}".format(s[i])] report += fmt % tuple(values) report += '\n' # compute averages values = [last_line_heading] for v in (np.average(p, weights=s), np.average(r, weights=s), np.average(f1, weights=s)): values += ["{0:0.{1}f}".format(v, digits)] values += ['{0}'.format(np.sum(s))] report += fmt % tuple(values) return report def hamming_loss(y_true, y_pred, classes=None, sample_weight=None): """Compute the average Hamming loss. The Hamming loss is the fraction of labels that are incorrectly predicted. Read more in the :ref:`User Guide <hamming_loss>`. Parameters ---------- y_true : 1d array-like, or label indicator array / sparse matrix Ground truth (correct) labels. y_pred : 1d array-like, or label indicator array / sparse matrix Predicted labels, as returned by a classifier. classes : array, shape = [n_labels], optional Integer array of labels. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- loss : float or int, Return the average Hamming loss between element of ``y_true`` and ``y_pred``. See Also -------- accuracy_score, jaccard_similarity_score, zero_one_loss Notes ----- In multiclass classification, the Hamming loss correspond to the Hamming distance between ``y_true`` and ``y_pred`` which is equivalent to the subset ``zero_one_loss`` function. In multilabel classification, the Hamming loss is different from the subset zero-one loss. The zero-one loss considers the entire set of labels for a given sample incorrect if it does entirely match the true set of labels. Hamming loss is more forgiving in that it penalizes the individual labels. The Hamming loss is upperbounded by the subset zero-one loss. When normalized over samples, the Hamming loss is always between 0 and 1. References ---------- .. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification: An Overview. International Journal of Data Warehousing & Mining, 3(3), 1-13, July-September 2007. .. [2] `Wikipedia entry on the Hamming distance <https://en.wikipedia.org/wiki/Hamming_distance>`_ Examples -------- >>> from sklearn.metrics import hamming_loss >>> y_pred = [1, 2, 3, 4] >>> y_true = [2, 2, 3, 4] >>> hamming_loss(y_true, y_pred) 0.25 In the multilabel case with binary label indicators: >>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2))) 0.75 """ y_type, y_true, y_pred = _check_targets(y_true, y_pred) if classes is None: classes = unique_labels(y_true, y_pred) else: classes = np.asarray(classes) if sample_weight is None: weight_average = 1. else: weight_average = np.mean(sample_weight) if y_type.startswith('multilabel'): n_differences = count_nonzero(y_true - y_pred, sample_weight=sample_weight) return (n_differences / (y_true.shape[0] * len(classes) * weight_average)) elif y_type in ["binary", "multiclass"]: return _weighted_sum(y_true != y_pred, sample_weight, normalize=True) else: raise ValueError("{0} is not supported".format(y_type)) def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None): """Log loss, aka logistic loss or cross-entropy loss. This is the loss function used in (multinomial) logistic regression and extensions of it such as neural networks, defined as the negative log-likelihood of the true labels given a probabilistic classifier's predictions. For a single sample with true label yt in {0,1} and estimated probability yp that yt = 1, the log loss is -log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp)) Read more in the :ref:`User Guide <log_loss>`. Parameters ---------- y_true : array-like or label indicator matrix Ground truth (correct) labels for n_samples samples. y_pred : array-like of float, shape = (n_samples, n_classes) Predicted probabilities, as returned by a classifier's predict_proba method. eps : float Log loss is undefined for p=0 or p=1, so probabilities are clipped to max(eps, min(1 - eps, p)). normalize : bool, optional (default=True) If true, return the mean loss per sample. Otherwise, return the sum of the per-sample losses. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- loss : float Examples -------- >>> log_loss(["spam", "ham", "ham", "spam"], # doctest: +ELLIPSIS ... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]]) 0.21616... References ---------- C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer, p. 209. Notes ----- The logarithm used is the natural logarithm (base-e). """ lb = LabelBinarizer() T = lb.fit_transform(y_true) if T.shape[1] == 1: T = np.append(1 - T, T, axis=1) y_pred = check_array(y_pred, ensure_2d=False) # Clipping Y = np.clip(y_pred, eps, 1 - eps) # This happens in cases when elements in y_pred have type "str". if not isinstance(Y, np.ndarray): raise ValueError("y_pred should be an array of floats.") # If y_pred is of single dimension, assume y_true to be binary # and then check. if Y.ndim == 1: Y = Y[:, np.newaxis] if Y.shape[1] == 1: Y = np.append(1 - Y, Y, axis=1) # Check if dimensions are consistent. check_consistent_length(T, Y) T = check_array(T) Y = check_array(Y) if T.shape[1] != Y.shape[1]: raise ValueError("y_true and y_pred have different number of classes " "%d, %d" % (T.shape[1], Y.shape[1])) # Renormalize Y /= Y.sum(axis=1)[:, np.newaxis] loss = -(T * np.log(Y)).sum(axis=1) return _weighted_sum(loss, sample_weight, normalize) def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None): """Average hinge loss (non-regularized) In binary class case, assuming labels in y_true are encoded with +1 and -1, when a prediction mistake is made, ``margin = y_true * pred_decision`` is always negative (since the signs disagree), implying ``1 - margin`` is always greater than 1. The cumulated hinge loss is therefore an upper bound of the number of mistakes made by the classifier. In multiclass case, the function expects that either all the labels are included in y_true or an optional labels argument is provided which contains all the labels. The multilabel margin is calculated according to Crammer-Singer's method. As in the binary case, the cumulated hinge loss is an upper bound of the number of mistakes made by the classifier. Read more in the :ref:`User Guide <hinge_loss>`. Parameters ---------- y_true : array, shape = [n_samples] True target, consisting of integers of two values. The positive label must be greater than the negative label. pred_decision : array, shape = [n_samples] or [n_samples, n_classes] Predicted decisions, as output by decision_function (floats). labels : array, optional, default None Contains all the labels for the problem. Used in multiclass hinge loss. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- loss : float References ---------- .. [1] `Wikipedia entry on the Hinge loss <https://en.wikipedia.org/wiki/Hinge_loss>`_ .. [2] Koby Crammer, Yoram Singer. On the Algorithmic Implementation of Multiclass Kernel-based Vector Machines. Journal of Machine Learning Research 2, (2001), 265-292 .. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models by Robert C. Moore, John DeNero. <http://www.ttic.edu/sigml/symposium2011/papers/ Moore+DeNero_Regularization.pdf>`_ Examples -------- >>> from sklearn import svm >>> from sklearn.metrics import hinge_loss >>> X = [[0], [1]] >>> y = [-1, 1] >>> est = svm.LinearSVC(random_state=0) >>> est.fit(X, y) LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True, intercept_scaling=1, loss='squared_hinge', max_iter=1000, multi_class='ovr', penalty='l2', random_state=0, tol=0.0001, verbose=0) >>> pred_decision = est.decision_function([[-2], [3], [0.5]]) >>> pred_decision # doctest: +ELLIPSIS array([-2.18..., 2.36..., 0.09...]) >>> hinge_loss([-1, 1, 1], pred_decision) # doctest: +ELLIPSIS 0.30... In the multiclass case: >>> X = np.array([[0], [1], [2], [3]]) >>> Y = np.array([0, 1, 2, 3]) >>> labels = np.array([0, 1, 2, 3]) >>> est = svm.LinearSVC() >>> est.fit(X, Y) LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True, intercept_scaling=1, loss='squared_hinge', max_iter=1000, multi_class='ovr', penalty='l2', random_state=None, tol=0.0001, verbose=0) >>> pred_decision = est.decision_function([[-1], [2], [3]]) >>> y_true = [0, 2, 3] >>> hinge_loss(y_true, pred_decision, labels) #doctest: +ELLIPSIS 0.56... """ check_consistent_length(y_true, pred_decision, sample_weight) pred_decision = check_array(pred_decision, ensure_2d=False) y_true = column_or_1d(y_true) y_true_unique = np.unique(y_true) if y_true_unique.size > 2: if (labels is None and pred_decision.ndim > 1 and (np.size(y_true_unique) != pred_decision.shape[1])): raise ValueError("Please include all labels in y_true " "or pass labels as third argument") if labels is None: labels = y_true_unique le = LabelEncoder() le.fit(labels) y_true = le.transform(y_true) mask = np.ones_like(pred_decision, dtype=bool) mask[np.arange(y_true.shape[0]), y_true] = False margin = pred_decision[~mask] margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1), axis=1) else: # Handles binary class case # this code assumes that positive and negative labels # are encoded as +1 and -1 respectively pred_decision = column_or_1d(pred_decision) pred_decision = np.ravel(pred_decision) lbin = LabelBinarizer(neg_label=-1) y_true = lbin.fit_transform(y_true)[:, 0] try: margin = y_true * pred_decision except TypeError: raise TypeError("pred_decision should be an array of floats.") losses = 1 - margin # The hinge_loss doesn't penalize good enough predictions. losses[losses <= 0] = 0 return np.average(losses, weights=sample_weight) def _check_binary_probabilistic_predictions(y_true, y_prob): """Check that y_true is binary and y_prob contains valid probabilities""" check_consistent_length(y_true, y_prob) labels = np.unique(y_true) if len(labels) != 2: raise ValueError("Only binary classification is supported. " "Provided labels %s." % labels) if y_prob.max() > 1: raise ValueError("y_prob contains values greater than 1.") if y_prob.min() < 0: raise ValueError("y_prob contains values less than 0.") return label_binarize(y_true, labels)[:, 0] def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None): """Compute the Brier score. The smaller the Brier score, the better, hence the naming with "loss". Across all items in a set N predictions, the Brier score measures the mean squared difference between (1) the predicted probability assigned to the possible outcomes for item i, and (2) the actual outcome. Therefore, the lower the Brier score is for a set of predictions, the better the predictions are calibrated. Note that the Brier score always takes on a value between zero and one, since this is the largest possible difference between a predicted probability (which must be between zero and one) and the actual outcome (which can take on values of only 0 and 1). The Brier score is appropriate for binary and categorical outcomes that can be structured as true or false, but is inappropriate for ordinal variables which can take on three or more values (this is because the Brier score assumes that all possible outcomes are equivalently "distant" from one another). Which label is considered to be the positive label is controlled via the parameter pos_label, which defaults to 1. Read more in the :ref:`User Guide <calibration>`. Parameters ---------- y_true : array, shape (n_samples,) True targets. y_prob : array, shape (n_samples,) Probabilities of the positive class. sample_weight : array-like of shape = [n_samples], optional Sample weights. pos_label : int (default: None) Label of the positive class. If None, the maximum label is used as positive class Returns ------- score : float Brier score Examples -------- >>> import numpy as np >>> from sklearn.metrics import brier_score_loss >>> y_true = np.array([0, 1, 1, 0]) >>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"]) >>> y_prob = np.array([0.1, 0.9, 0.8, 0.3]) >>> brier_score_loss(y_true, y_prob) # doctest: +ELLIPSIS 0.037... >>> brier_score_loss(y_true, 1-y_prob, pos_label=0) # doctest: +ELLIPSIS 0.037... >>> brier_score_loss(y_true_categorical, y_prob, \ pos_label="ham") # doctest: +ELLIPSIS 0.037... >>> brier_score_loss(y_true, np.array(y_prob) > 0.5) 0.0 References ---------- https://en.wikipedia.org/wiki/Brier_score """ y_true = column_or_1d(y_true) y_prob = column_or_1d(y_prob) if pos_label is None: pos_label = y_true.max() y_true = np.array(y_true == pos_label, int) y_true = _check_binary_probabilistic_predictions(y_true, y_prob) return np.average((y_true - y_prob) ** 2, weights=sample_weight)
bsd-3-clause
DailyActie/Surrogate-Model
01-codes/scikit-learn-master/examples/decomposition/plot_sparse_coding.py
1
4054
""" =========================================== Sparse coding with a precomputed dictionary =========================================== Transform a signal as a sparse combination of Ricker wavelets. This example visually compares different sparse coding methods using the :class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known as Mexican hat or the second derivative of a Gaussian) is not a particularly good kernel to represent piecewise constant signals like this one. It can therefore be seen how much adding different widths of atoms matters and it therefore motivates learning the dictionary to best fit your type of signals. The richer dictionary on the right is not larger in size, heavier subsampling is performed in order to stay on the same order of magnitude. """ print(__doc__) import matplotlib.pylab as plt import numpy as np from sklearn.decomposition import SparseCoder def ricker_function(resolution, center, width): """Discrete sub-sampled Ricker (Mexican hat) wavelet""" x = np.linspace(0, resolution - 1, resolution) x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4))) * (1 - ((x - center) ** 2 / width ** 2)) * np.exp((-(x - center) ** 2) / (2 * width ** 2))) return x def ricker_matrix(width, resolution, n_components): """Dictionary of Ricker (Mexican hat) wavelets""" centers = np.linspace(0, resolution - 1, n_components) D = np.empty((n_components, resolution)) for i, center in enumerate(centers): D[i] = ricker_function(resolution, center, width) D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis] return D resolution = 1024 subsampling = 3 # subsampling factor width = 100 n_components = resolution / subsampling # Compute a wavelet dictionary D_fixed = ricker_matrix(width=width, resolution=resolution, n_components=n_components) D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution, n_components=np.floor(n_components / 5)) for w in (10, 50, 100, 500, 1000))] # Generate a signal y = np.linspace(0, resolution - 1, resolution) first_quarter = y < resolution / 4 y[first_quarter] = 3. y[np.logical_not(first_quarter)] = -1. # List the different sparse coding methods in the following format: # (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs) estimators = [('OMP', 'omp', None, 15, 'navy'), ('Lasso', 'lasso_cd', 2, None, 'turquoise'), ] lw = 2 plt.figure(figsize=(13, 6)) for subplot, (D, title) in enumerate(zip((D_fixed, D_multi), ('fixed width', 'multiple widths'))): plt.subplot(1, 2, subplot + 1) plt.title('Sparse coding against %s dictionary' % title) plt.plot(y, lw=lw, linestyle='--', label='Original signal') # Do a wavelet approximation for title, algo, alpha, n_nonzero, color in estimators: coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero, transform_alpha=alpha, transform_algorithm=algo) x = coder.transform(y.reshape(1, -1)) density = len(np.flatnonzero(x)) x = np.ravel(np.dot(x, D)) squared_error = np.sum((y - x) ** 2) plt.plot(x, color=color, lw=lw, label='%s: %s nonzero coefs,\n%.2f error' % (title, density, squared_error)) # Soft thresholding debiasing coder = SparseCoder(dictionary=D, transform_algorithm='threshold', transform_alpha=20) x = coder.transform(y.reshape(1, -1)) _, idx = np.where(x != 0) x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y) x = np.ravel(np.dot(x, D)) squared_error = np.sum((y - x) ** 2) plt.plot(x, color='darkorange', lw=lw, label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error' % (len(idx), squared_error)) plt.axis('tight') plt.legend(shadow=False, loc='best') plt.subplots_adjust(.04, .07, .97, .90, .09, .2) plt.show()
mit
ChrisBeaumont/brut
bubbly/hyperopt.py
2
2563
""" A simple interface for random exploration of hyperparameter space """ import random import numpy as np from scipy import stats from sklearn.metrics import auc from sklearn import metrics as met class Choice(object): """Randomly select from a list""" def __init__(self, *choices): self._choices = choices def rvs(self): return random.choice(self._choices) class Space(object): """ Spaces gather and randomly sample collections of hyperparameters Any class with an rvs method is a valid hyperparameter (e.g., anything in scipy.stats is a hyperparameter) """ def __init__(self, **hyperparams): self._hyperparams = hyperparams def __iter__(self): while True: yield {k: v.rvs() for k, v in self._hyperparams.items()} def auc_below_fpos(y, yp, fpos): """ Variant on the area under the ROC curve score Only integrate the portion of the curve to the left of a threshold in fpos """ fp, tp, th = met.roc_curve(y, yp) good = (fp <= fpos) return auc(fp[good], tp[good]) def fmin(objective, space, threshold=np.inf): """ Generator that randomly samples a space, and yields whenever a new minimum is encountered Parameters ---------- objective : A function which takes hyperparameters as input, and computes an objective function and classifier out output space : the Space to sample threshold : A threshold in the objective function values. If provided, will not yield anything until the objective function falls below threshold Yields ------ Tuples of (objective function, parameter dict, classifier) """ best = threshold try: for p in space: f, clf = objective(**p) if f < best: best = f yield best, p, clf except KeyboardInterrupt: pass #default space for Gradient Boosted Decision trees gb_space = Space(learning_rate = stats.uniform(1e-3, 1 - 1.01e-3), n_estimators = Choice(50, 100, 200), max_depth = Choice(1, 2, 3), subsample = stats.uniform(1e-3, 1 - 1.01e-3)) #default space for WiseRF random forests rf_space = Space(n_estimators = Choice(200, 400, 800, 1600), min_samples_split = Choice(1, 2, 4), criterion = Choice('gini', 'gainratio', 'infogain'), max_features = Choice('auto'), n_jobs = Choice(2))
mit
zuku1985/scikit-learn
sklearn/utils/tests/test_multiclass.py
58
14316
from __future__ import division import numpy as np import scipy.sparse as sp from itertools import product from sklearn.externals.six.moves import xrange from sklearn.externals.six import iteritems from scipy.sparse import issparse from scipy.sparse import csc_matrix from scipy.sparse import csr_matrix from scipy.sparse import coo_matrix from scipy.sparse import dok_matrix from scipy.sparse import lil_matrix from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_false from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_raises_regex from sklearn.utils.multiclass import unique_labels from sklearn.utils.multiclass import is_multilabel from sklearn.utils.multiclass import type_of_target from sklearn.utils.multiclass import class_distribution from sklearn.utils.multiclass import check_classification_targets from sklearn.utils.metaestimators import _safe_split from sklearn.model_selection import ShuffleSplit from sklearn.svm import SVC from sklearn import datasets class NotAnArray(object): """An object that is convertable to an array. This is useful to simulate a Pandas timeseries.""" def __init__(self, data): self.data = data def __array__(self, dtype=None): return self.data EXAMPLES = { 'multilabel-indicator': [ # valid when the data is formatted as sparse or dense, identified # by CSR format when the testing takes place csr_matrix(np.random.RandomState(42).randint(2, size=(10, 10))), csr_matrix(np.array([[0, 1], [1, 0]])), csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.bool)), csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.int8)), csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.uint8)), csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float)), csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float32)), csr_matrix(np.array([[0, 0], [0, 0]])), csr_matrix(np.array([[0, 1]])), # Only valid when data is dense np.array([[-1, 1], [1, -1]]), np.array([[-3, 3], [3, -3]]), NotAnArray(np.array([[-3, 3], [3, -3]])), ], 'multiclass': [ [1, 0, 2, 2, 1, 4, 2, 4, 4, 4], np.array([1, 0, 2]), np.array([1, 0, 2], dtype=np.int8), np.array([1, 0, 2], dtype=np.uint8), np.array([1, 0, 2], dtype=np.float), np.array([1, 0, 2], dtype=np.float32), np.array([[1], [0], [2]]), NotAnArray(np.array([1, 0, 2])), [0, 1, 2], ['a', 'b', 'c'], np.array([u'a', u'b', u'c']), np.array([u'a', u'b', u'c'], dtype=object), np.array(['a', 'b', 'c'], dtype=object), ], 'multiclass-multioutput': [ np.array([[1, 0, 2, 2], [1, 4, 2, 4]]), np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.int8), np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.uint8), np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float), np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float32), np.array([['a', 'b'], ['c', 'd']]), np.array([[u'a', u'b'], [u'c', u'd']]), np.array([[u'a', u'b'], [u'c', u'd']], dtype=object), np.array([[1, 0, 2]]), NotAnArray(np.array([[1, 0, 2]])), ], 'binary': [ [0, 1], [1, 1], [], [0], np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1]), np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.bool), np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.int8), np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.uint8), np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float), np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float32), np.array([[0], [1]]), NotAnArray(np.array([[0], [1]])), [1, -1], [3, 5], ['a'], ['a', 'b'], ['abc', 'def'], np.array(['abc', 'def']), [u'a', u'b'], np.array(['abc', 'def'], dtype=object), ], 'continuous': [ [1e-5], [0, .5], np.array([[0], [.5]]), np.array([[0], [.5]], dtype=np.float32), ], 'continuous-multioutput': [ np.array([[0, .5], [.5, 0]]), np.array([[0, .5], [.5, 0]], dtype=np.float32), np.array([[0, .5]]), ], 'unknown': [ [[]], [()], # sequence of sequences that weren't supported even before deprecation np.array([np.array([]), np.array([1, 2, 3])], dtype=object), [np.array([]), np.array([1, 2, 3])], [set([1, 2, 3]), set([1, 2])], [frozenset([1, 2, 3]), frozenset([1, 2])], # and also confusable as sequences of sequences [{0: 'a', 1: 'b'}, {0: 'a'}], # empty second dimension np.array([[], []]), # 3d np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]), ] } NON_ARRAY_LIKE_EXAMPLES = [ set([1, 2, 3]), {0: 'a', 1: 'b'}, {0: [5], 1: [5]}, 'abc', frozenset([1, 2, 3]), None, ] MULTILABEL_SEQUENCES = [ [[1], [2], [0, 1]], [(), (2), (0, 1)], np.array([[], [1, 2]], dtype='object'), NotAnArray(np.array([[], [1, 2]], dtype='object')) ] def test_unique_labels(): # Empty iterable assert_raises(ValueError, unique_labels) # Multiclass problem assert_array_equal(unique_labels(xrange(10)), np.arange(10)) assert_array_equal(unique_labels(np.arange(10)), np.arange(10)) assert_array_equal(unique_labels([4, 0, 2]), np.array([0, 2, 4])) # Multilabel indicator assert_array_equal(unique_labels(np.array([[0, 0, 1], [1, 0, 1], [0, 0, 0]])), np.arange(3)) assert_array_equal(unique_labels(np.array([[0, 0, 1], [0, 0, 0]])), np.arange(3)) # Several arrays passed assert_array_equal(unique_labels([4, 0, 2], xrange(5)), np.arange(5)) assert_array_equal(unique_labels((0, 1, 2), (0,), (2, 1)), np.arange(3)) # Border line case with binary indicator matrix assert_raises(ValueError, unique_labels, [4, 0, 2], np.ones((5, 5))) assert_raises(ValueError, unique_labels, np.ones((5, 4)), np.ones((5, 5))) assert_array_equal(unique_labels(np.ones((4, 5)), np.ones((5, 5))), np.arange(5)) def test_unique_labels_non_specific(): # Test unique_labels with a variety of collected examples # Smoke test for all supported format for format in ["binary", "multiclass", "multilabel-indicator"]: for y in EXAMPLES[format]: unique_labels(y) # We don't support those format at the moment for example in NON_ARRAY_LIKE_EXAMPLES: assert_raises(ValueError, unique_labels, example) for y_type in ["unknown", "continuous", 'continuous-multioutput', 'multiclass-multioutput']: for example in EXAMPLES[y_type]: assert_raises(ValueError, unique_labels, example) def test_unique_labels_mixed_types(): # Mix with binary or multiclass and multilabel mix_clf_format = product(EXAMPLES["multilabel-indicator"], EXAMPLES["multiclass"] + EXAMPLES["binary"]) for y_multilabel, y_multiclass in mix_clf_format: assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel) assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass) assert_raises(ValueError, unique_labels, [[1, 2]], [["a", "d"]]) assert_raises(ValueError, unique_labels, ["1", 2]) assert_raises(ValueError, unique_labels, [["1", 2], [1, 3]]) assert_raises(ValueError, unique_labels, [["1", "2"], [2, 3]]) def test_is_multilabel(): for group, group_examples in iteritems(EXAMPLES): if group in ['multilabel-indicator']: dense_assert_, dense_exp = assert_true, 'True' else: dense_assert_, dense_exp = assert_false, 'False' for example in group_examples: # Only mark explicitly defined sparse examples as valid sparse # multilabel-indicators if group == 'multilabel-indicator' and issparse(example): sparse_assert_, sparse_exp = assert_true, 'True' else: sparse_assert_, sparse_exp = assert_false, 'False' if (issparse(example) or (hasattr(example, '__array__') and np.asarray(example).ndim == 2 and np.asarray(example).dtype.kind in 'biuf' and np.asarray(example).shape[1] > 0)): examples_sparse = [sparse_matrix(example) for sparse_matrix in [coo_matrix, csc_matrix, csr_matrix, dok_matrix, lil_matrix]] for exmpl_sparse in examples_sparse: sparse_assert_(is_multilabel(exmpl_sparse), msg=('is_multilabel(%r)' ' should be %s') % (exmpl_sparse, sparse_exp)) # Densify sparse examples before testing if issparse(example): example = example.toarray() dense_assert_(is_multilabel(example), msg='is_multilabel(%r) should be %s' % (example, dense_exp)) def test_check_classification_targets(): for y_type in EXAMPLES.keys(): if y_type in ["unknown", "continuous", 'continuous-multioutput']: for example in EXAMPLES[y_type]: msg = 'Unknown label type: ' assert_raises_regex(ValueError, msg, check_classification_targets, example) else: for example in EXAMPLES[y_type]: check_classification_targets(example) # @ignore_warnings def test_type_of_target(): for group, group_examples in iteritems(EXAMPLES): for example in group_examples: assert_equal(type_of_target(example), group, msg=('type_of_target(%r) should be %r, got %r' % (example, group, type_of_target(example)))) for example in NON_ARRAY_LIKE_EXAMPLES: msg_regex = 'Expected array-like \(array or non-string sequence\).*' assert_raises_regex(ValueError, msg_regex, type_of_target, example) for example in MULTILABEL_SEQUENCES: msg = ('You appear to be using a legacy multi-label data ' 'representation. Sequence of sequences are no longer supported;' ' use a binary array or sparse matrix instead.') assert_raises_regex(ValueError, msg, type_of_target, example) def test_class_distribution(): y = np.array([[1, 0, 0, 1], [2, 2, 0, 1], [1, 3, 0, 1], [4, 2, 0, 1], [2, 0, 0, 1], [1, 3, 0, 1]]) # Define the sparse matrix with a mix of implicit and explicit zeros data = np.array([1, 2, 1, 4, 2, 1, 0, 2, 3, 2, 3, 1, 1, 1, 1, 1, 1]) indices = np.array([0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 5, 0, 1, 2, 3, 4, 5]) indptr = np.array([0, 6, 11, 11, 17]) y_sp = sp.csc_matrix((data, indices, indptr), shape=(6, 4)) classes, n_classes, class_prior = class_distribution(y) classes_sp, n_classes_sp, class_prior_sp = class_distribution(y_sp) classes_expected = [[1, 2, 4], [0, 2, 3], [0], [1]] n_classes_expected = [3, 3, 1, 1] class_prior_expected = [[3/6, 2/6, 1/6], [1/3, 1/3, 1/3], [1.0], [1.0]] for k in range(y.shape[1]): assert_array_almost_equal(classes[k], classes_expected[k]) assert_array_almost_equal(n_classes[k], n_classes_expected[k]) assert_array_almost_equal(class_prior[k], class_prior_expected[k]) assert_array_almost_equal(classes_sp[k], classes_expected[k]) assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k]) assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k]) # Test again with explicit sample weights (classes, n_classes, class_prior) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0]) (classes_sp, n_classes_sp, class_prior_sp) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0]) class_prior_expected = [[4/9, 3/9, 2/9], [2/9, 4/9, 3/9], [1.0], [1.0]] for k in range(y.shape[1]): assert_array_almost_equal(classes[k], classes_expected[k]) assert_array_almost_equal(n_classes[k], n_classes_expected[k]) assert_array_almost_equal(class_prior[k], class_prior_expected[k]) assert_array_almost_equal(classes_sp[k], classes_expected[k]) assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k]) assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k]) def test_safe_split_with_precomputed_kernel(): clf = SVC() clfp = SVC(kernel="precomputed") iris = datasets.load_iris() X, y = iris.data, iris.target K = np.dot(X, X.T) cv = ShuffleSplit(test_size=0.25, random_state=0) train, test = list(cv.split(X))[0] X_train, y_train = _safe_split(clf, X, y, train) K_train, y_train2 = _safe_split(clfp, K, y, train) assert_array_almost_equal(K_train, np.dot(X_train, X_train.T)) assert_array_almost_equal(y_train, y_train2) X_test, y_test = _safe_split(clf, X, y, test, train) K_test, y_test2 = _safe_split(clfp, K, y, test, train) assert_array_almost_equal(K_test, np.dot(X_test, X_train.T)) assert_array_almost_equal(y_test, y_test2)
bsd-3-clause
etkirsch/scikit-learn
sklearn/datasets/species_distributions.py
198
7923
""" ============================= Species distribution dataset ============================= This dataset represents the geographic distribution of species. The dataset is provided by Phillips et. al. (2006). The two species are: - `"Bradypus variegatus" <http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ , the Brown-throated Sloth. - `"Microryzomys minutus" <http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ , also known as the Forest Small Rice Rat, a rodent that lives in Peru, Colombia, Ecuador, Peru, and Venezuela. References: * `"Maximum entropy modeling of species geographic distributions" <http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_ S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling, 190:231-259, 2006. Notes: * See examples/applications/plot_species_distribution_modeling.py for an example of using this dataset """ # Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com> # Jake Vanderplas <vanderplas@astro.washington.edu> # # License: BSD 3 clause from io import BytesIO from os import makedirs from os.path import join from os.path import exists try: # Python 2 from urllib2 import urlopen PY2 = True except ImportError: # Python 3 from urllib.request import urlopen PY2 = False import numpy as np from sklearn.datasets.base import get_data_home, Bunch from sklearn.externals import joblib DIRECTORY_URL = "http://www.cs.princeton.edu/~schapire/maxent/datasets/" SAMPLES_URL = join(DIRECTORY_URL, "samples.zip") COVERAGES_URL = join(DIRECTORY_URL, "coverages.zip") DATA_ARCHIVE_NAME = "species_coverage.pkz" def _load_coverage(F, header_length=6, dtype=np.int16): """Load a coverage file from an open file object. This will return a numpy array of the given dtype """ header = [F.readline() for i in range(header_length)] make_tuple = lambda t: (t.split()[0], float(t.split()[1])) header = dict([make_tuple(line) for line in header]) M = np.loadtxt(F, dtype=dtype) nodata = header[b'NODATA_value'] if nodata != -9999: print(nodata) M[nodata] = -9999 return M def _load_csv(F): """Load csv file. Parameters ---------- F : file object CSV file open in byte mode. Returns ------- rec : np.ndarray record array representing the data """ if PY2: # Numpy recarray wants Python 2 str but not unicode names = F.readline().strip().split(',') else: # Numpy recarray wants Python 3 str but not bytes... names = F.readline().decode('ascii').strip().split(',') rec = np.loadtxt(F, skiprows=0, delimiter=',', dtype='a22,f4,f4') rec.dtype.names = names return rec def construct_grids(batch): """Construct the map grid from the batch object Parameters ---------- batch : Batch object The object returned by :func:`fetch_species_distributions` Returns ------- (xgrid, ygrid) : 1-D arrays The grid corresponding to the values in batch.coverages """ # x,y coordinates for corner cells xmin = batch.x_left_lower_corner + batch.grid_size xmax = xmin + (batch.Nx * batch.grid_size) ymin = batch.y_left_lower_corner + batch.grid_size ymax = ymin + (batch.Ny * batch.grid_size) # x coordinates of the grid cells xgrid = np.arange(xmin, xmax, batch.grid_size) # y coordinates of the grid cells ygrid = np.arange(ymin, ymax, batch.grid_size) return (xgrid, ygrid) def fetch_species_distributions(data_home=None, download_if_missing=True): """Loader for species distribution dataset from Phillips et. al. (2006) Read more in the :ref:`User Guide <datasets>`. Parameters ---------- data_home : optional, default: None Specify another download and cache folder for the datasets. By default all scikit learn data is stored in '~/scikit_learn_data' subfolders. download_if_missing: optional, True by default If False, raise a IOError if the data is not locally available instead of trying to download the data from the source site. Returns -------- The data is returned as a Bunch object with the following attributes: coverages : array, shape = [14, 1592, 1212] These represent the 14 features measured at each point of the map grid. The latitude/longitude values for the grid are discussed below. Missing data is represented by the value -9999. train : record array, shape = (1623,) The training points for the data. Each point has three fields: - train['species'] is the species name - train['dd long'] is the longitude, in degrees - train['dd lat'] is the latitude, in degrees test : record array, shape = (619,) The test points for the data. Same format as the training data. Nx, Ny : integers The number of longitudes (x) and latitudes (y) in the grid x_left_lower_corner, y_left_lower_corner : floats The (x,y) position of the lower-left corner, in degrees grid_size : float The spacing between points of the grid, in degrees Notes ------ This dataset represents the geographic distribution of species. The dataset is provided by Phillips et. al. (2006). The two species are: - `"Bradypus variegatus" <http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ , the Brown-throated Sloth. - `"Microryzomys minutus" <http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ , also known as the Forest Small Rice Rat, a rodent that lives in Peru, Colombia, Ecuador, Peru, and Venezuela. References ---------- * `"Maximum entropy modeling of species geographic distributions" <http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_ S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling, 190:231-259, 2006. Notes ----- * See examples/applications/plot_species_distribution_modeling.py for an example of using this dataset with scikit-learn """ data_home = get_data_home(data_home) if not exists(data_home): makedirs(data_home) # Define parameters for the data files. These should not be changed # unless the data model changes. They will be saved in the npz file # with the downloaded data. extra_params = dict(x_left_lower_corner=-94.8, Nx=1212, y_left_lower_corner=-56.05, Ny=1592, grid_size=0.05) dtype = np.int16 if not exists(join(data_home, DATA_ARCHIVE_NAME)): print('Downloading species data from %s to %s' % (SAMPLES_URL, data_home)) X = np.load(BytesIO(urlopen(SAMPLES_URL).read())) for f in X.files: fhandle = BytesIO(X[f]) if 'train' in f: train = _load_csv(fhandle) if 'test' in f: test = _load_csv(fhandle) print('Downloading coverage data from %s to %s' % (COVERAGES_URL, data_home)) X = np.load(BytesIO(urlopen(COVERAGES_URL).read())) coverages = [] for f in X.files: fhandle = BytesIO(X[f]) print(' - converting', f) coverages.append(_load_coverage(fhandle)) coverages = np.asarray(coverages, dtype=dtype) bunch = Bunch(coverages=coverages, test=test, train=train, **extra_params) joblib.dump(bunch, join(data_home, DATA_ARCHIVE_NAME), compress=9) else: bunch = joblib.load(join(data_home, DATA_ARCHIVE_NAME)) return bunch
bsd-3-clause
ominux/scikit-learn
examples/cluster/plot_adjusted_for_chance_measures.py
1
4105
""" ========================================================== Adjustment for chance in clustering performance evaluation ========================================================== The following plots demonstrate the impact of the number of clusters and number of samples on various clustering performance evaluation metrics. Non-adjusted measures such as the V-Measure show a dependency between the number of clusters and the number of samples: the mean V-Measure of random labeling increases signicantly as the number of clusters is closer to the total number of samples used to compute the measure. Adjusted for chance measure such as ARI display some random variations centered around a mean score of 0.0 for any number of samples and clusters. Only adjusted measures can hence safely be used as a consensus index to evaluate the average stability of clustering algorithms for a given value of k on various overlapping sub-samples of the dataset. """ print __doc__ # Author: Olivier Grisel <olivier.grisel@ensta.org> # License: Simplified BSD import numpy as np import pylab as pl from sklearn import metrics def uniform_labelings_scores(score_func, n_samples, n_clusters_range, fixed_n_classes=None, n_runs=10, seed=42): """Compute score for 2 random uniform cluster labelings. Both random labelings have the same number of clusters for each value possible value in ``n_clusters_range``. When fixed_n_classes is not None the first labeling is considered a ground truth class assignement with fixed number of classes. """ random_labels = np.random.RandomState(seed).random_integers scores = np.zeros((len(n_clusters_range), n_runs)) if fixed_n_classes is not None: labels_a = random_labels(low=0, high=fixed_n_classes - 1, size=n_samples) for i, k in enumerate(n_clusters_range): for j in range(n_runs): if fixed_n_classes is None: labels_a = random_labels(low=0, high=k - 1, size=n_samples) labels_b = random_labels(low=0, high=k - 1, size=n_samples) scores[i, j] = score_func(labels_a, labels_b) return scores score_funcs = [ metrics.adjusted_rand_score, metrics.v_measure_score, ] # 2 independent random clusterings with equal cluster number n_samples = 100 n_clusters_range = np.linspace(2, n_samples, 10).astype(np.int) pl.figure(1) plots = [] names = [] for score_func in score_funcs: print "Computing %s for %d values of n_clusters and n_samples=%d" % ( score_func.__name__, len(n_clusters_range), n_samples) scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range) plots.append(pl.errorbar( n_clusters_range, scores.mean(axis=1), scores.std(axis=1))) names.append(score_func.__name__) pl.title("Clustering measures for 2 random uniform labelings\n" "with equal number of clusters") pl.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples) pl.ylabel('Score value') pl.legend(plots, names) pl.ylim(ymin=-0.05, ymax=1.05) pl.show() # Random labeling with varying n_clusters against ground class labels # with fixed number of clusters n_samples = 1000 n_clusters_range = np.linspace(2, 100, 10).astype(np.int) n_classes = 10 pl.figure(2) plots = [] names = [] for score_func in score_funcs: print "Computing %s for %d values of n_clusters and n_samples=%d" % ( score_func.__name__, len(n_clusters_range), n_samples) scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range, fixed_n_classes=n_classes) plots.append(pl.errorbar( n_clusters_range, scores.mean(axis=1), scores.std(axis=1))) names.append(score_func.__name__) pl.title("Clustering measures for random uniform labeling\n" "against reference assignement with %d classes" % n_classes) pl.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples) pl.ylabel('Score value') pl.ylim(ymin=-0.05, ymax=1.05) pl.legend(plots, names) pl.show()
bsd-3-clause
helloworldajou/webserver
demos/classifier_webcam.py
4
7059
#!/usr/bin/env python2 # # Example to run classifier on webcam stream. # Brandon Amos & Vijayenthiran # 2016/06/21 # # Copyright 2015-2016 Carnegie Mellon University # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Contrib: Vijayenthiran # This example file shows to run a classifier on webcam stream. You need to # run the classifier.py to generate classifier with your own dataset. # To run this file from the openface home dir: # ./demo/classifier_webcam.py <path-to-your-classifier> import time start = time.time() import argparse import cv2 import os import pickle import sys import numpy as np np.set_printoptions(precision=2) from sklearn.mixture import GMM import openface fileDir = os.path.dirname(os.path.realpath(__file__)) modelDir = os.path.join(fileDir, '..', 'models') dlibModelDir = os.path.join(modelDir, 'dlib') openfaceModelDir = os.path.join(modelDir, 'openface') def getRep(bgrImg): start = time.time() if bgrImg is None: raise Exception("Unable to load image/frame") rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB) if args.verbose: print(" + Original size: {}".format(rgbImg.shape)) if args.verbose: print("Loading the image took {} seconds.".format(time.time() - start)) start = time.time() # Get the largest face bounding box # bb = align.getLargestFaceBoundingBox(rgbImg) #Bounding box # Get all bounding boxes bb = align.getAllFaceBoundingBoxes(rgbImg) if bb is None: # raise Exception("Unable to find a face: {}".format(imgPath)) return None if args.verbose: print("Face detection took {} seconds.".format(time.time() - start)) start = time.time() alignedFaces = [] for box in bb: alignedFaces.append( align.align( args.imgDim, rgbImg, box, landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)) if alignedFaces is None: raise Exception("Unable to align the frame") if args.verbose: print("Alignment took {} seconds.".format(time.time() - start)) start = time.time() reps = [] for alignedFace in alignedFaces: reps.append(net.forward(alignedFace)) if args.verbose: print("Neural network forward pass took {} seconds.".format( time.time() - start)) # print (reps) return reps def infer(img, args): with open(args.classifierModel, 'r') as f: if sys.version_info[0] < 3: (le, clf) = pickle.load(f) # le - label and clf - classifer else: (le, clf) = pickle.load(f, encoding='latin1') # le - label and clf - classifer reps = getRep(img) persons = [] confidences = [] for rep in reps: try: rep = rep.reshape(1, -1) except: print ("No Face detected") return (None, None) start = time.time() predictions = clf.predict_proba(rep).ravel() # print (predictions) maxI = np.argmax(predictions) # max2 = np.argsort(predictions)[-3:][::-1][1] persons.append(le.inverse_transform(maxI)) # print (str(le.inverse_transform(max2)) + ": "+str( predictions [max2])) # ^ prints the second prediction confidences.append(predictions[maxI]) if args.verbose: print("Prediction took {} seconds.".format(time.time() - start)) pass # print("Predict {} with {:.2f} confidence.".format(person.decode('utf-8'), confidence)) if isinstance(clf, GMM): dist = np.linalg.norm(rep - clf.means_[maxI]) print(" + Distance from the mean: {}".format(dist)) pass return (persons, confidences) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( '--dlibFacePredictor', type=str, help="Path to dlib's face predictor.", default=os.path.join( dlibModelDir, "shape_predictor_68_face_landmarks.dat")) parser.add_argument( '--networkModel', type=str, help="Path to Torch network model.", default=os.path.join( openfaceModelDir, 'nn4.small2.v1.t7')) parser.add_argument('--imgDim', type=int, help="Default image dimension.", default=96) parser.add_argument( '--captureDevice', type=int, default=0, help='Capture device. 0 for latop webcam and 1 for usb webcam') parser.add_argument('--width', type=int, default=320) parser.add_argument('--height', type=int, default=240) parser.add_argument('--threshold', type=float, default=0.5) parser.add_argument('--cuda', action='store_true') parser.add_argument('--verbose', action='store_true') parser.add_argument( 'classifierModel', type=str, help='The Python pickle representing the classifier. This is NOT the Torch network model, which can be set with --networkModel.') args = parser.parse_args() align = openface.AlignDlib(args.dlibFacePredictor) net = openface.TorchNeuralNet( args.networkModel, imgDim=args.imgDim, cuda=args.cuda) # Capture device. Usually 0 will be webcam and 1 will be usb cam. video_capture = cv2.VideoCapture(args.captureDevice) video_capture.set(3, args.width) video_capture.set(4, args.height) confidenceList = [] while True: ret, frame = video_capture.read() persons, confidences = infer(frame, args) print ("P: " + str(persons) + " C: " + str(confidences)) try: # append with two floating point precision confidenceList.append('%.2f' % confidences[0]) except: # If there is no face detected, confidences matrix will be empty. # We can simply ignore it. pass for i, c in enumerate(confidences): if c <= args.threshold: # 0.5 is kept as threshold for known face. persons[i] = "_unknown" # Print the person name and conf value on the frame cv2.putText(frame, "P: {} C: {}".format(persons, confidences), (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1) cv2.imshow('', frame) # quit the program on the press of key 'q' if cv2.waitKey(1) & 0xFF == ord('q'): break # When everything is done, release the capture video_capture.release() cv2.destroyAllWindows()
apache-2.0
roxyboy/scikit-learn
sklearn/mixture/tests/test_dpgmm.py
261
4490
import unittest import sys import numpy as np from sklearn.mixture import DPGMM, VBGMM from sklearn.mixture.dpgmm import log_normalize from sklearn.datasets import make_blobs from sklearn.utils.testing import assert_array_less, assert_equal from sklearn.mixture.tests.test_gmm import GMMTester from sklearn.externals.six.moves import cStringIO as StringIO np.seterr(all='warn') def test_class_weights(): # check that the class weights are updated # simple 3 cluster dataset X, y = make_blobs(random_state=1) for Model in [DPGMM, VBGMM]: dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50) dpgmm.fit(X) # get indices of components that are used: indices = np.unique(dpgmm.predict(X)) active = np.zeros(10, dtype=np.bool) active[indices] = True # used components are important assert_array_less(.1, dpgmm.weights_[active]) # others are not assert_array_less(dpgmm.weights_[~active], .05) def test_verbose_boolean(): # checks that the output for the verbose output is the same # for the flag values '1' and 'True' # simple 3 cluster dataset X, y = make_blobs(random_state=1) for Model in [DPGMM, VBGMM]: dpgmm_bool = Model(n_components=10, random_state=1, alpha=20, n_iter=50, verbose=True) dpgmm_int = Model(n_components=10, random_state=1, alpha=20, n_iter=50, verbose=1) old_stdout = sys.stdout sys.stdout = StringIO() try: # generate output with the boolean flag dpgmm_bool.fit(X) verbose_output = sys.stdout verbose_output.seek(0) bool_output = verbose_output.readline() # generate output with the int flag dpgmm_int.fit(X) verbose_output = sys.stdout verbose_output.seek(0) int_output = verbose_output.readline() assert_equal(bool_output, int_output) finally: sys.stdout = old_stdout def test_verbose_first_level(): # simple 3 cluster dataset X, y = make_blobs(random_state=1) for Model in [DPGMM, VBGMM]: dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50, verbose=1) old_stdout = sys.stdout sys.stdout = StringIO() try: dpgmm.fit(X) finally: sys.stdout = old_stdout def test_verbose_second_level(): # simple 3 cluster dataset X, y = make_blobs(random_state=1) for Model in [DPGMM, VBGMM]: dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50, verbose=2) old_stdout = sys.stdout sys.stdout = StringIO() try: dpgmm.fit(X) finally: sys.stdout = old_stdout def test_log_normalize(): v = np.array([0.1, 0.8, 0.01, 0.09]) a = np.log(2 * v) assert np.allclose(v, log_normalize(a), rtol=0.01) def do_model(self, **kwds): return VBGMM(verbose=False, **kwds) class DPGMMTester(GMMTester): model = DPGMM do_test_eval = False def score(self, g, train_obs): _, z = g.score_samples(train_obs) return g.lower_bound(train_obs, z) class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester): covariance_type = 'spherical' setUp = GMMTester._setUp class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester): covariance_type = 'diag' setUp = GMMTester._setUp class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester): covariance_type = 'tied' setUp = GMMTester._setUp class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester): covariance_type = 'full' setUp = GMMTester._setUp class VBGMMTester(GMMTester): model = do_model do_test_eval = False def score(self, g, train_obs): _, z = g.score_samples(train_obs) return g.lower_bound(train_obs, z) class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester): covariance_type = 'spherical' setUp = GMMTester._setUp class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester): covariance_type = 'diag' setUp = GMMTester._setUp class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester): covariance_type = 'tied' setUp = GMMTester._setUp class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester): covariance_type = 'full' setUp = GMMTester._setUp
bsd-3-clause
igabriel85/dmon-adp
misc/keras_test.py
1
1530
import numpy import pandas as pd from keras.models import Sequential from keras.layers import Dense from keras.wrappers.scikit_learn import KerasClassifier from keras.utils import np_utils from sklearn.model_selection import cross_val_score from sklearn.model_selection import KFold from sklearn.preprocessing import LabelEncoder from sklearn.pipeline import Pipeline import os, sys # fix random seed for reproducibility seed = 7 numpy.random.seed(seed) dataDir = os.path.join(os.path.dirname(os.path.abspath('')), 'data') # load dataset dataframe = pd.read_csv(os.path.join(dataDir, 'iris.csv')) dataset = dataframe.values X = dataset[:,0:4].astype(float) Y = dataset[:,4] # print Y # encode class values as integers encoder = LabelEncoder() encoder.fit(Y) encoded_Y = encoder.transform(Y) # convert integers to dummy variables (i.e. one hot encoded) dummy_y = np_utils.to_categorical(encoded_Y) # print dummy_y # define baseline model def baseline_model(): # create model model = Sequential() model.add(Dense(8, input_dim=4, activation='relu')) model.add(Dense(3, activation='softmax')) # Compile model model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) return model estimator = KerasClassifier(build_fn=baseline_model, epochs=200, batch_size=20, verbose=1) kfold = KFold(n_splits=10, shuffle=True, random_state=seed) results = cross_val_score(estimator, X, dummy_y, cv=kfold) print("Baseline: %.2f%% (%.2f%%)" % (results.mean()*100, results.std()*100))
apache-2.0
RachitKansal/scikit-learn
examples/feature_selection/plot_rfe_with_cross_validation.py
226
1384
""" =================================================== Recursive feature elimination with cross-validation =================================================== A recursive feature elimination example with automatic tuning of the number of features selected with cross-validation. """ print(__doc__) import matplotlib.pyplot as plt from sklearn.svm import SVC from sklearn.cross_validation import StratifiedKFold from sklearn.feature_selection import RFECV from sklearn.datasets import make_classification # Build a classification task using 3 informative features X, y = make_classification(n_samples=1000, n_features=25, n_informative=3, n_redundant=2, n_repeated=0, n_classes=8, n_clusters_per_class=1, random_state=0) # Create the RFE object and compute a cross-validated score. svc = SVC(kernel="linear") # The "accuracy" scoring is proportional to the number of correct # classifications rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2), scoring='accuracy') rfecv.fit(X, y) print("Optimal number of features : %d" % rfecv.n_features_) # Plot number of features VS. cross-validation scores plt.figure() plt.xlabel("Number of features selected") plt.ylabel("Cross validation score (nb of correct classifications)") plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_) plt.show()
bsd-3-clause
tawsifkhan/scikit-learn
sklearn/linear_model/omp.py
127
30417
"""Orthogonal matching pursuit algorithms """ # Author: Vlad Niculae # # License: BSD 3 clause import warnings from distutils.version import LooseVersion import numpy as np from scipy import linalg from scipy.linalg.lapack import get_lapack_funcs from .base import LinearModel, _pre_fit from ..base import RegressorMixin from ..utils import as_float_array, check_array, check_X_y from ..cross_validation import check_cv from ..externals.joblib import Parallel, delayed import scipy solve_triangular_args = {} if LooseVersion(scipy.__version__) >= LooseVersion('0.12'): # check_finite=False is an optimization available only in scipy >=0.12 solve_triangular_args = {'check_finite': False} premature = """ Orthogonal matching pursuit ended prematurely due to linear dependence in the dictionary. The requested precision might not have been met. """ def _cholesky_omp(X, y, n_nonzero_coefs, tol=None, copy_X=True, return_path=False): """Orthogonal Matching Pursuit step using the Cholesky decomposition. Parameters ---------- X : array, shape (n_samples, n_features) Input dictionary. Columns are assumed to have unit norm. y : array, shape (n_samples,) Input targets n_nonzero_coefs : int Targeted number of non-zero elements tol : float Targeted squared error, if not None overrides n_nonzero_coefs. copy_X : bool, optional Whether the design matrix X must be copied by the algorithm. A false value is only helpful if X is already Fortran-ordered, otherwise a copy is made anyway. return_path : bool, optional. Default: False Whether to return every value of the nonzero coefficients along the forward path. Useful for cross-validation. Returns ------- gamma : array, shape (n_nonzero_coefs,) Non-zero elements of the solution idx : array, shape (n_nonzero_coefs,) Indices of the positions of the elements in gamma within the solution vector coef : array, shape (n_features, n_nonzero_coefs) The first k values of column k correspond to the coefficient value for the active features at that step. The lower left triangle contains garbage. Only returned if ``return_path=True``. n_active : int Number of active features at convergence. """ if copy_X: X = X.copy('F') else: # even if we are allowed to overwrite, still copy it if bad order X = np.asfortranarray(X) min_float = np.finfo(X.dtype).eps nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (X,)) potrs, = get_lapack_funcs(('potrs',), (X,)) alpha = np.dot(X.T, y) residual = y gamma = np.empty(0) n_active = 0 indices = np.arange(X.shape[1]) # keeping track of swapping max_features = X.shape[1] if tol is not None else n_nonzero_coefs if solve_triangular_args: # new scipy, don't need to initialize because check_finite=False L = np.empty((max_features, max_features), dtype=X.dtype) else: # old scipy, we need the garbage upper triangle to be non-Inf L = np.zeros((max_features, max_features), dtype=X.dtype) L[0, 0] = 1. if return_path: coefs = np.empty_like(L) while True: lam = np.argmax(np.abs(np.dot(X.T, residual))) if lam < n_active or alpha[lam] ** 2 < min_float: # atom already selected or inner product too small warnings.warn(premature, RuntimeWarning, stacklevel=2) break if n_active > 0: # Updates the Cholesky decomposition of X' X L[n_active, :n_active] = np.dot(X[:, :n_active].T, X[:, lam]) linalg.solve_triangular(L[:n_active, :n_active], L[n_active, :n_active], trans=0, lower=1, overwrite_b=True, **solve_triangular_args) v = nrm2(L[n_active, :n_active]) ** 2 if 1 - v <= min_float: # selected atoms are dependent warnings.warn(premature, RuntimeWarning, stacklevel=2) break L[n_active, n_active] = np.sqrt(1 - v) X.T[n_active], X.T[lam] = swap(X.T[n_active], X.T[lam]) alpha[n_active], alpha[lam] = alpha[lam], alpha[n_active] indices[n_active], indices[lam] = indices[lam], indices[n_active] n_active += 1 # solves LL'x = y as a composition of two triangular systems gamma, _ = potrs(L[:n_active, :n_active], alpha[:n_active], lower=True, overwrite_b=False) if return_path: coefs[:n_active, n_active - 1] = gamma residual = y - np.dot(X[:, :n_active], gamma) if tol is not None and nrm2(residual) ** 2 <= tol: break elif n_active == max_features: break if return_path: return gamma, indices[:n_active], coefs[:, :n_active], n_active else: return gamma, indices[:n_active], n_active def _gram_omp(Gram, Xy, n_nonzero_coefs, tol_0=None, tol=None, copy_Gram=True, copy_Xy=True, return_path=False): """Orthogonal Matching Pursuit step on a precomputed Gram matrix. This function uses the the Cholesky decomposition method. Parameters ---------- Gram : array, shape (n_features, n_features) Gram matrix of the input data matrix Xy : array, shape (n_features,) Input targets n_nonzero_coefs : int Targeted number of non-zero elements tol_0 : float Squared norm of y, required if tol is not None. tol : float Targeted squared error, if not None overrides n_nonzero_coefs. copy_Gram : bool, optional Whether the gram matrix must be copied by the algorithm. A false value is only helpful if it is already Fortran-ordered, otherwise a copy is made anyway. copy_Xy : bool, optional Whether the covariance vector Xy must be copied by the algorithm. If False, it may be overwritten. return_path : bool, optional. Default: False Whether to return every value of the nonzero coefficients along the forward path. Useful for cross-validation. Returns ------- gamma : array, shape (n_nonzero_coefs,) Non-zero elements of the solution idx : array, shape (n_nonzero_coefs,) Indices of the positions of the elements in gamma within the solution vector coefs : array, shape (n_features, n_nonzero_coefs) The first k values of column k correspond to the coefficient value for the active features at that step. The lower left triangle contains garbage. Only returned if ``return_path=True``. n_active : int Number of active features at convergence. """ Gram = Gram.copy('F') if copy_Gram else np.asfortranarray(Gram) if copy_Xy: Xy = Xy.copy() min_float = np.finfo(Gram.dtype).eps nrm2, swap = linalg.get_blas_funcs(('nrm2', 'swap'), (Gram,)) potrs, = get_lapack_funcs(('potrs',), (Gram,)) indices = np.arange(len(Gram)) # keeping track of swapping alpha = Xy tol_curr = tol_0 delta = 0 gamma = np.empty(0) n_active = 0 max_features = len(Gram) if tol is not None else n_nonzero_coefs if solve_triangular_args: # new scipy, don't need to initialize because check_finite=False L = np.empty((max_features, max_features), dtype=Gram.dtype) else: # old scipy, we need the garbage upper triangle to be non-Inf L = np.zeros((max_features, max_features), dtype=Gram.dtype) L[0, 0] = 1. if return_path: coefs = np.empty_like(L) while True: lam = np.argmax(np.abs(alpha)) if lam < n_active or alpha[lam] ** 2 < min_float: # selected same atom twice, or inner product too small warnings.warn(premature, RuntimeWarning, stacklevel=3) break if n_active > 0: L[n_active, :n_active] = Gram[lam, :n_active] linalg.solve_triangular(L[:n_active, :n_active], L[n_active, :n_active], trans=0, lower=1, overwrite_b=True, **solve_triangular_args) v = nrm2(L[n_active, :n_active]) ** 2 if 1 - v <= min_float: # selected atoms are dependent warnings.warn(premature, RuntimeWarning, stacklevel=3) break L[n_active, n_active] = np.sqrt(1 - v) Gram[n_active], Gram[lam] = swap(Gram[n_active], Gram[lam]) Gram.T[n_active], Gram.T[lam] = swap(Gram.T[n_active], Gram.T[lam]) indices[n_active], indices[lam] = indices[lam], indices[n_active] Xy[n_active], Xy[lam] = Xy[lam], Xy[n_active] n_active += 1 # solves LL'x = y as a composition of two triangular systems gamma, _ = potrs(L[:n_active, :n_active], Xy[:n_active], lower=True, overwrite_b=False) if return_path: coefs[:n_active, n_active - 1] = gamma beta = np.dot(Gram[:, :n_active], gamma) alpha = Xy - beta if tol is not None: tol_curr += delta delta = np.inner(gamma, beta[:n_active]) tol_curr -= delta if abs(tol_curr) <= tol: break elif n_active == max_features: break if return_path: return gamma, indices[:n_active], coefs[:, :n_active], n_active else: return gamma, indices[:n_active], n_active def orthogonal_mp(X, y, n_nonzero_coefs=None, tol=None, precompute=False, copy_X=True, return_path=False, return_n_iter=False): """Orthogonal Matching Pursuit (OMP) Solves n_targets Orthogonal Matching Pursuit problems. An instance of the problem has the form: When parametrized by the number of non-zero coefficients using `n_nonzero_coefs`: argmin ||y - X\gamma||^2 subject to ||\gamma||_0 <= n_{nonzero coefs} When parametrized by error using the parameter `tol`: argmin ||\gamma||_0 subject to ||y - X\gamma||^2 <= tol Read more in the :ref:`User Guide <omp>`. Parameters ---------- X : array, shape (n_samples, n_features) Input data. Columns are assumed to have unit norm. y : array, shape (n_samples,) or (n_samples, n_targets) Input targets n_nonzero_coefs : int Desired number of non-zero entries in the solution. If None (by default) this value is set to 10% of n_features. tol : float Maximum norm of the residual. If not None, overrides n_nonzero_coefs. precompute : {True, False, 'auto'}, Whether to perform precomputations. Improves performance when n_targets or n_samples is very large. copy_X : bool, optional Whether the design matrix X must be copied by the algorithm. A false value is only helpful if X is already Fortran-ordered, otherwise a copy is made anyway. return_path : bool, optional. Default: False Whether to return every value of the nonzero coefficients along the forward path. Useful for cross-validation. return_n_iter : bool, optional default False Whether or not to return the number of iterations. Returns ------- coef : array, shape (n_features,) or (n_features, n_targets) Coefficients of the OMP solution. If `return_path=True`, this contains the whole coefficient path. In this case its shape is (n_features, n_features) or (n_features, n_targets, n_features) and iterating over the last axis yields coefficients in increasing order of active features. n_iters : array-like or int Number of active features across every target. Returned only if `return_n_iter` is set to True. See also -------- OrthogonalMatchingPursuit orthogonal_mp_gram lars_path decomposition.sparse_encode Notes ----- Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang, Matching pursuits with time-frequency dictionaries, IEEE Transactions on Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415. (http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf) This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad, M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal Matching Pursuit Technical Report - CS Technion, April 2008. http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf """ X = check_array(X, order='F', copy=copy_X) copy_X = False if y.ndim == 1: y = y.reshape(-1, 1) y = check_array(y) if y.shape[1] > 1: # subsequent targets will be affected copy_X = True if n_nonzero_coefs is None and tol is None: # default for n_nonzero_coefs is 0.1 * n_features # but at least one. n_nonzero_coefs = max(int(0.1 * X.shape[1]), 1) if tol is not None and tol < 0: raise ValueError("Epsilon cannot be negative") if tol is None and n_nonzero_coefs <= 0: raise ValueError("The number of atoms must be positive") if tol is None and n_nonzero_coefs > X.shape[1]: raise ValueError("The number of atoms cannot be more than the number " "of features") if precompute == 'auto': precompute = X.shape[0] > X.shape[1] if precompute: G = np.dot(X.T, X) G = np.asfortranarray(G) Xy = np.dot(X.T, y) if tol is not None: norms_squared = np.sum((y ** 2), axis=0) else: norms_squared = None return orthogonal_mp_gram(G, Xy, n_nonzero_coefs, tol, norms_squared, copy_Gram=copy_X, copy_Xy=False, return_path=return_path) if return_path: coef = np.zeros((X.shape[1], y.shape[1], X.shape[1])) else: coef = np.zeros((X.shape[1], y.shape[1])) n_iters = [] for k in range(y.shape[1]): out = _cholesky_omp( X, y[:, k], n_nonzero_coefs, tol, copy_X=copy_X, return_path=return_path) if return_path: _, idx, coefs, n_iter = out coef = coef[:, :, :len(idx)] for n_active, x in enumerate(coefs.T): coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1] else: x, idx, n_iter = out coef[idx, k] = x n_iters.append(n_iter) if y.shape[1] == 1: n_iters = n_iters[0] if return_n_iter: return np.squeeze(coef), n_iters else: return np.squeeze(coef) def orthogonal_mp_gram(Gram, Xy, n_nonzero_coefs=None, tol=None, norms_squared=None, copy_Gram=True, copy_Xy=True, return_path=False, return_n_iter=False): """Gram Orthogonal Matching Pursuit (OMP) Solves n_targets Orthogonal Matching Pursuit problems using only the Gram matrix X.T * X and the product X.T * y. Read more in the :ref:`User Guide <omp>`. Parameters ---------- Gram : array, shape (n_features, n_features) Gram matrix of the input data: X.T * X Xy : array, shape (n_features,) or (n_features, n_targets) Input targets multiplied by X: X.T * y n_nonzero_coefs : int Desired number of non-zero entries in the solution. If None (by default) this value is set to 10% of n_features. tol : float Maximum norm of the residual. If not None, overrides n_nonzero_coefs. norms_squared : array-like, shape (n_targets,) Squared L2 norms of the lines of y. Required if tol is not None. copy_Gram : bool, optional Whether the gram matrix must be copied by the algorithm. A false value is only helpful if it is already Fortran-ordered, otherwise a copy is made anyway. copy_Xy : bool, optional Whether the covariance vector Xy must be copied by the algorithm. If False, it may be overwritten. return_path : bool, optional. Default: False Whether to return every value of the nonzero coefficients along the forward path. Useful for cross-validation. return_n_iter : bool, optional default False Whether or not to return the number of iterations. Returns ------- coef : array, shape (n_features,) or (n_features, n_targets) Coefficients of the OMP solution. If `return_path=True`, this contains the whole coefficient path. In this case its shape is (n_features, n_features) or (n_features, n_targets, n_features) and iterating over the last axis yields coefficients in increasing order of active features. n_iters : array-like or int Number of active features across every target. Returned only if `return_n_iter` is set to True. See also -------- OrthogonalMatchingPursuit orthogonal_mp lars_path decomposition.sparse_encode Notes ----- Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang, Matching pursuits with time-frequency dictionaries, IEEE Transactions on Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415. (http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf) This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad, M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal Matching Pursuit Technical Report - CS Technion, April 2008. http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf """ Gram = check_array(Gram, order='F', copy=copy_Gram) Xy = np.asarray(Xy) if Xy.ndim > 1 and Xy.shape[1] > 1: # or subsequent target will be affected copy_Gram = True if Xy.ndim == 1: Xy = Xy[:, np.newaxis] if tol is not None: norms_squared = [norms_squared] if n_nonzero_coefs is None and tol is None: n_nonzero_coefs = int(0.1 * len(Gram)) if tol is not None and norms_squared is None: raise ValueError('Gram OMP needs the precomputed norms in order ' 'to evaluate the error sum of squares.') if tol is not None and tol < 0: raise ValueError("Epsilon cannot be negative") if tol is None and n_nonzero_coefs <= 0: raise ValueError("The number of atoms must be positive") if tol is None and n_nonzero_coefs > len(Gram): raise ValueError("The number of atoms cannot be more than the number " "of features") if return_path: coef = np.zeros((len(Gram), Xy.shape[1], len(Gram))) else: coef = np.zeros((len(Gram), Xy.shape[1])) n_iters = [] for k in range(Xy.shape[1]): out = _gram_omp( Gram, Xy[:, k], n_nonzero_coefs, norms_squared[k] if tol is not None else None, tol, copy_Gram=copy_Gram, copy_Xy=copy_Xy, return_path=return_path) if return_path: _, idx, coefs, n_iter = out coef = coef[:, :, :len(idx)] for n_active, x in enumerate(coefs.T): coef[idx[:n_active + 1], k, n_active] = x[:n_active + 1] else: x, idx, n_iter = out coef[idx, k] = x n_iters.append(n_iter) if Xy.shape[1] == 1: n_iters = n_iters[0] if return_n_iter: return np.squeeze(coef), n_iters else: return np.squeeze(coef) class OrthogonalMatchingPursuit(LinearModel, RegressorMixin): """Orthogonal Matching Pursuit model (OMP) Parameters ---------- n_nonzero_coefs : int, optional Desired number of non-zero entries in the solution. If None (by default) this value is set to 10% of n_features. tol : float, optional Maximum norm of the residual. If not None, overrides n_nonzero_coefs. fit_intercept : boolean, optional whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional If False, the regressors X are assumed to be already normalized. precompute : {True, False, 'auto'}, default 'auto' Whether to use a precomputed Gram and Xy matrix to speed up calculations. Improves performance when `n_targets` or `n_samples` is very large. Note that if you already have such matrices, you can pass them directly to the fit method. Read more in the :ref:`User Guide <omp>`. Attributes ---------- coef_ : array, shape (n_features,) or (n_features, n_targets) parameter vector (w in the formula) intercept_ : float or array, shape (n_targets,) independent term in decision function. n_iter_ : int or array-like Number of active features across every target. Notes ----- Orthogonal matching pursuit was introduced in G. Mallat, Z. Zhang, Matching pursuits with time-frequency dictionaries, IEEE Transactions on Signal Processing, Vol. 41, No. 12. (December 1993), pp. 3397-3415. (http://blanche.polytechnique.fr/~mallat/papiers/MallatPursuit93.pdf) This implementation is based on Rubinstein, R., Zibulevsky, M. and Elad, M., Efficient Implementation of the K-SVD Algorithm using Batch Orthogonal Matching Pursuit Technical Report - CS Technion, April 2008. http://www.cs.technion.ac.il/~ronrubin/Publications/KSVD-OMP-v2.pdf See also -------- orthogonal_mp orthogonal_mp_gram lars_path Lars LassoLars decomposition.sparse_encode """ def __init__(self, n_nonzero_coefs=None, tol=None, fit_intercept=True, normalize=True, precompute='auto'): self.n_nonzero_coefs = n_nonzero_coefs self.tol = tol self.fit_intercept = fit_intercept self.normalize = normalize self.precompute = precompute def fit(self, X, y): """Fit the model using X, y as training data. Parameters ---------- X : array-like, shape (n_samples, n_features) Training data. y : array-like, shape (n_samples,) or (n_samples, n_targets) Target values. Returns ------- self : object returns an instance of self. """ X, y = check_X_y(X, y, multi_output=True, y_numeric=True) n_features = X.shape[1] X, y, X_mean, y_mean, X_std, Gram, Xy = \ _pre_fit(X, y, None, self.precompute, self.normalize, self.fit_intercept, copy=True) if y.ndim == 1: y = y[:, np.newaxis] if self.n_nonzero_coefs is None and self.tol is None: # default for n_nonzero_coefs is 0.1 * n_features # but at least one. self.n_nonzero_coefs_ = max(int(0.1 * n_features), 1) else: self.n_nonzero_coefs_ = self.n_nonzero_coefs if Gram is False: coef_, self.n_iter_ = orthogonal_mp( X, y, self.n_nonzero_coefs_, self.tol, precompute=False, copy_X=True, return_n_iter=True) else: norms_sq = np.sum(y ** 2, axis=0) if self.tol is not None else None coef_, self.n_iter_ = orthogonal_mp_gram( Gram, Xy=Xy, n_nonzero_coefs=self.n_nonzero_coefs_, tol=self.tol, norms_squared=norms_sq, copy_Gram=True, copy_Xy=True, return_n_iter=True) self.coef_ = coef_.T self._set_intercept(X_mean, y_mean, X_std) return self def _omp_path_residues(X_train, y_train, X_test, y_test, copy=True, fit_intercept=True, normalize=True, max_iter=100): """Compute the residues on left-out data for a full LARS path Parameters ----------- X_train : array, shape (n_samples, n_features) The data to fit the LARS on y_train : array, shape (n_samples) The target variable to fit LARS on X_test : array, shape (n_samples, n_features) The data to compute the residues on y_test : array, shape (n_samples) The target variable to compute the residues on copy : boolean, optional Whether X_train, X_test, y_train and y_test should be copied. If False, they may be overwritten. fit_intercept : boolean whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. max_iter : integer, optional Maximum numbers of iterations to perform, therefore maximum features to include. 100 by default. Returns ------- residues: array, shape (n_samples, max_features) Residues of the prediction on the test data """ if copy: X_train = X_train.copy() y_train = y_train.copy() X_test = X_test.copy() y_test = y_test.copy() if fit_intercept: X_mean = X_train.mean(axis=0) X_train -= X_mean X_test -= X_mean y_mean = y_train.mean(axis=0) y_train = as_float_array(y_train, copy=False) y_train -= y_mean y_test = as_float_array(y_test, copy=False) y_test -= y_mean if normalize: norms = np.sqrt(np.sum(X_train ** 2, axis=0)) nonzeros = np.flatnonzero(norms) X_train[:, nonzeros] /= norms[nonzeros] coefs = orthogonal_mp(X_train, y_train, n_nonzero_coefs=max_iter, tol=None, precompute=False, copy_X=False, return_path=True) if coefs.ndim == 1: coefs = coefs[:, np.newaxis] if normalize: coefs[nonzeros] /= norms[nonzeros][:, np.newaxis] return np.dot(coefs.T, X_test.T) - y_test class OrthogonalMatchingPursuitCV(LinearModel, RegressorMixin): """Cross-validated Orthogonal Matching Pursuit model (OMP) Parameters ---------- copy : bool, optional Whether the design matrix X must be copied by the algorithm. A false value is only helpful if X is already Fortran-ordered, otherwise a copy is made anyway. fit_intercept : boolean, optional whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional If False, the regressors X are assumed to be already normalized. max_iter : integer, optional Maximum numbers of iterations to perform, therefore maximum features to include. 10% of ``n_features`` but at least 5 if available. cv : cross-validation generator, optional see :mod:`sklearn.cross_validation`. If ``None`` is passed, default to a 5-fold strategy n_jobs : integer, optional Number of CPUs to use during the cross validation. If ``-1``, use all the CPUs verbose : boolean or integer, optional Sets the verbosity amount Read more in the :ref:`User Guide <omp>`. Attributes ---------- intercept_ : float or array, shape (n_targets,) Independent term in decision function. coef_ : array, shape (n_features,) or (n_features, n_targets) Parameter vector (w in the problem formulation). n_nonzero_coefs_ : int Estimated number of non-zero coefficients giving the best mean squared error over the cross-validation folds. n_iter_ : int or array-like Number of active features across every target for the model refit with the best hyperparameters got by cross-validating across all folds. See also -------- orthogonal_mp orthogonal_mp_gram lars_path Lars LassoLars OrthogonalMatchingPursuit LarsCV LassoLarsCV decomposition.sparse_encode """ def __init__(self, copy=True, fit_intercept=True, normalize=True, max_iter=None, cv=None, n_jobs=1, verbose=False): self.copy = copy self.fit_intercept = fit_intercept self.normalize = normalize self.max_iter = max_iter self.cv = cv self.n_jobs = n_jobs self.verbose = verbose def fit(self, X, y): """Fit the model using X, y as training data. Parameters ---------- X : array-like, shape [n_samples, n_features] Training data. y : array-like, shape [n_samples] Target values. Returns ------- self : object returns an instance of self. """ X, y = check_X_y(X, y, y_numeric=True) X = as_float_array(X, copy=False, force_all_finite=False) cv = check_cv(self.cv, X, y, classifier=False) max_iter = (min(max(int(0.1 * X.shape[1]), 5), X.shape[1]) if not self.max_iter else self.max_iter) cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)( delayed(_omp_path_residues)( X[train], y[train], X[test], y[test], self.copy, self.fit_intercept, self.normalize, max_iter) for train, test in cv) min_early_stop = min(fold.shape[0] for fold in cv_paths) mse_folds = np.array([(fold[:min_early_stop] ** 2).mean(axis=1) for fold in cv_paths]) best_n_nonzero_coefs = np.argmin(mse_folds.mean(axis=0)) + 1 self.n_nonzero_coefs_ = best_n_nonzero_coefs omp = OrthogonalMatchingPursuit(n_nonzero_coefs=best_n_nonzero_coefs, fit_intercept=self.fit_intercept, normalize=self.normalize) omp.fit(X, y) self.coef_ = omp.coef_ self.intercept_ = omp.intercept_ self.n_iter_ = omp.n_iter_ return self
bsd-3-clause
lpeska/BRDTI
netlaprls.py
1
2811
''' We base the NetLapRLS implementation on the one from PyDTI project, https://github.com/stephenliu0423/PyDTI, changes were made to the evaluation procedure [1] Xia, Zheng, et al. "Semi-supervised drug-protein interaction prediction from heterogeneous biological spaces." BMC systems biology 4.Suppl 2 (2010): S6. Default parameters: gamma_d = 0.01, gamma_d=gamma_d2/gamma_d1 gamma_t = 0.01, gamma_t=gamma_p2/gamma_p1 beta_d = 0.3 beta_t = 0.3 ''' import numpy as np from sklearn.metrics import precision_recall_curve, roc_curve from sklearn.metrics import auc from functions import normalized_discounted_cummulative_gain class NetLapRLS: def __init__(self, gamma_d=0.01, gamma_t=0.01, beta_d=0.3, beta_t=0.3): self.gamma_d = float(gamma_d) self.gamma_t = float(gamma_t) self.beta_d = float(beta_d) self.beta_t = float(beta_t) def fix_model(self, W, intMat, drugMat, targetMat, seed=None): R = W*intMat m, n = R.shape drugMat = (drugMat+drugMat.T)/2 targetMat = (targetMat+targetMat.T)/2 Wd = (drugMat+self.gamma_d*np.dot(R, R.T))/(1.0+self.gamma_d) Wt = (targetMat+self.gamma_t*np.dot(R.T, R))/(1.0+self.gamma_t) Wd = Wd-np.diag(np.diag(Wd)) Wt = Wt-np.diag(np.diag(Wt)) D = np.diag(np.sqrt(1.0/np.sum(Wd, axis=1))) Ld = np.eye(m) - np.dot(np.dot(D, Wd), D) D = np.diag(np.sqrt(1.0/np.sum(Wt, axis=1))) Lt = np.eye(n) - np.dot(np.dot(D, Wt), D) X = np.linalg.inv(Wd+self.beta_d*np.dot(Ld, Wd)) Fd = np.dot(np.dot(Wd, X), R) X = np.linalg.inv(Wt+self.beta_t*np.dot(Lt, Wt)) Ft = np.dot(np.dot(Wt, X), R.T) self.predictR = 0.5*(Fd+Ft.T) def predict_scores(self, test_data, N): inx = np.array(test_data) return self.predictR[inx[:, 0], inx[:, 1]] def evaluation(self, test_data, test_label): scores = self.predictR[test_data[:, 0], test_data[:, 1]] self.scores = scores x, y = test_data[:, 0], test_data[:, 1] test_data_T = np.column_stack((y,x)) ndcg = normalized_discounted_cummulative_gain(test_data, test_label, np.array(scores)) ndcg_inv = normalized_discounted_cummulative_gain(test_data_T, test_label, np.array(scores)) prec, rec, thr = precision_recall_curve(test_label, scores) aupr_val = auc(rec, prec) fpr, tpr, thr = roc_curve(test_label, scores) auc_val = auc(fpr, tpr) #!!!!we should distinguish here between inverted and not inverted methods nDCGs!!!! return aupr_val, auc_val, ndcg, ndcg_inv def __str__(self): return "Model: NetLapRLS, gamma_d:%s, gamma_t:%s, beta_d:%s, beta_t:%s" % (self.gamma_d, self.gamma_t, self.beta_d, self.beta_t)
gpl-2.0
poryfly/scikit-learn
sklearn/kernel_ridge.py
155
6545
"""Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression.""" # Authors: Mathieu Blondel <mathieu@mblondel.org> # Jan Hendrik Metzen <jhm@informatik.uni-bremen.de> # License: BSD 3 clause import numpy as np from .base import BaseEstimator, RegressorMixin from .metrics.pairwise import pairwise_kernels from .linear_model.ridge import _solve_cholesky_kernel from .utils import check_X_y from .utils.validation import check_is_fitted class KernelRidge(BaseEstimator, RegressorMixin): """Kernel ridge regression. Kernel ridge regression (KRR) combines ridge regression (linear least squares with l2-norm regularization) with the kernel trick. It thus learns a linear function in the space induced by the respective kernel and the data. For non-linear kernels, this corresponds to a non-linear function in the original space. The form of the model learned by KRR is identical to support vector regression (SVR). However, different loss functions are used: KRR uses squared error loss while support vector regression uses epsilon-insensitive loss, both combined with l2 regularization. In contrast to SVR, fitting a KRR model can be done in closed-form and is typically faster for medium-sized datasets. On the other hand, the learned model is non-sparse and thus slower than SVR, which learns a sparse model for epsilon > 0, at prediction-time. This estimator has built-in support for multi-variate regression (i.e., when y is a 2d-array of shape [n_samples, n_targets]). Read more in the :ref:`User Guide <kernel_ridge>`. Parameters ---------- alpha : {float, array-like}, shape = [n_targets] Small positive values of alpha improve the conditioning of the problem and reduce the variance of the estimates. Alpha corresponds to ``(2*C)^-1`` in other linear models such as LogisticRegression or LinearSVC. If an array is passed, penalties are assumed to be specific to the targets. Hence they must correspond in number. kernel : string or callable, default="linear" Kernel mapping used internally. A callable should accept two arguments and the keyword arguments passed to this object as kernel_params, and should return a floating point number. gamma : float, default=None Gamma parameter for the RBF, polynomial, exponential chi2 and sigmoid kernels. Interpretation of the default value is left to the kernel; see the documentation for sklearn.metrics.pairwise. Ignored by other kernels. degree : float, default=3 Degree of the polynomial kernel. Ignored by other kernels. coef0 : float, default=1 Zero coefficient for polynomial and sigmoid kernels. Ignored by other kernels. kernel_params : mapping of string to any, optional Additional parameters (keyword arguments) for kernel function passed as callable object. Attributes ---------- dual_coef_ : array, shape = [n_features] or [n_targets, n_features] Weight vector(s) in kernel space X_fit_ : {array-like, sparse matrix}, shape = [n_samples, n_features] Training data, which is also required for prediction References ---------- * Kevin P. Murphy "Machine Learning: A Probabilistic Perspective", The MIT Press chapter 14.4.3, pp. 492-493 See also -------- Ridge Linear ridge regression. SVR Support Vector Regression implemented using libsvm. Examples -------- >>> from sklearn.kernel_ridge import KernelRidge >>> import numpy as np >>> n_samples, n_features = 10, 5 >>> rng = np.random.RandomState(0) >>> y = rng.randn(n_samples) >>> X = rng.randn(n_samples, n_features) >>> clf = KernelRidge(alpha=1.0) >>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE KernelRidge(alpha=1.0, coef0=1, degree=3, gamma=None, kernel='linear', kernel_params=None) """ def __init__(self, alpha=1, kernel="linear", gamma=None, degree=3, coef0=1, kernel_params=None): self.alpha = alpha self.kernel = kernel self.gamma = gamma self.degree = degree self.coef0 = coef0 self.kernel_params = kernel_params def _get_kernel(self, X, Y=None): if callable(self.kernel): params = self.kernel_params or {} else: params = {"gamma": self.gamma, "degree": self.degree, "coef0": self.coef0} return pairwise_kernels(X, Y, metric=self.kernel, filter_params=True, **params) @property def _pairwise(self): return self.kernel == "precomputed" def fit(self, X, y=None, sample_weight=None): """Fit Kernel Ridge regression model Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training data y : array-like, shape = [n_samples] or [n_samples, n_targets] Target values sample_weight : float or numpy array of shape [n_samples] Individual weights for each sample, ignored if None is passed. Returns ------- self : returns an instance of self. """ # Convert data X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True, y_numeric=True) K = self._get_kernel(X) alpha = np.atleast_1d(self.alpha) ravel = False if len(y.shape) == 1: y = y.reshape(-1, 1) ravel = True copy = self.kernel == "precomputed" self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha, sample_weight, copy) if ravel: self.dual_coef_ = self.dual_coef_.ravel() self.X_fit_ = X return self def predict(self, X): """Predict using the the kernel ridge model Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Samples. Returns ------- C : array, shape = [n_samples] or [n_samples, n_targets] Returns predicted values. """ check_is_fitted(self, ["X_fit_", "dual_coef_"]) K = self._get_kernel(X, self.X_fit_) return np.dot(K, self.dual_coef_)
bsd-3-clause
abhishekgahlot/scikit-learn
examples/applications/topics_extraction_with_nmf.py
106
2313
""" ======================================================== Topics extraction with Non-Negative Matrix Factorization ======================================================== This is a proof of concept application of Non Negative Matrix Factorization of the term frequency matrix of a corpus of documents so as to extract an additive model of the topic structure of the corpus. The output is a list of topics, each represented as a list of terms (weights are not shown). The default parameters (n_samples / n_features / n_topics) should make the example runnable in a couple of tens of seconds. You can try to increase the dimensions of the problem, but be aware than the time complexity is polynomial. """ # Author: Olivier Grisel <olivier.grisel@ensta.org> # Lars Buitinck <L.J.Buitinck@uva.nl> # License: BSD 3 clause from __future__ import print_function from time import time from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.decomposition import NMF from sklearn.datasets import fetch_20newsgroups n_samples = 2000 n_features = 1000 n_topics = 10 n_top_words = 20 # Load the 20 newsgroups dataset and vectorize it. We use a few heuristics # to filter out useless terms early on: the posts are stripped of headers, # footers and quoted replies, and common English words, words occurring in # only one document or in at least 95% of the documents are removed. t0 = time() print("Loading dataset and extracting TF-IDF features...") dataset = fetch_20newsgroups(shuffle=True, random_state=1, remove=('headers', 'footers', 'quotes')) vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, max_features=n_features, stop_words='english') tfidf = vectorizer.fit_transform(dataset.data[:n_samples]) print("done in %0.3fs." % (time() - t0)) # Fit the NMF model print("Fitting the NMF model with n_samples=%d and n_features=%d..." % (n_samples, n_features)) nmf = NMF(n_components=n_topics, random_state=1).fit(tfidf) print("done in %0.3fs." % (time() - t0)) feature_names = vectorizer.get_feature_names() for topic_idx, topic in enumerate(nmf.components_): print("Topic #%d:" % topic_idx) print(" ".join([feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]])) print()
bsd-3-clause
RachitKansal/scikit-learn
examples/mixture/plot_gmm_classifier.py
250
3918
""" ================== GMM classification ================== Demonstration of Gaussian mixture models for classification. See :ref:`gmm` for more information on the estimator. Plots predicted labels on both training and held out test data using a variety of GMM classifiers on the iris dataset. Compares GMMs with spherical, diagonal, full, and tied covariance matrices in increasing order of performance. Although one would expect full covariance to perform best in general, it is prone to overfitting on small datasets and does not generalize well to held out test data. On the plots, train data is shown as dots, while test data is shown as crosses. The iris dataset is four-dimensional. Only the first two dimensions are shown here, and thus some points are separated in other dimensions. """ print(__doc__) # Author: Ron Weiss <ronweiss@gmail.com>, Gael Varoquaux # License: BSD 3 clause # $Id$ import matplotlib.pyplot as plt import matplotlib as mpl import numpy as np from sklearn import datasets from sklearn.cross_validation import StratifiedKFold from sklearn.externals.six.moves import xrange from sklearn.mixture import GMM def make_ellipses(gmm, ax): for n, color in enumerate('rgb'): v, w = np.linalg.eigh(gmm._get_covars()[n][:2, :2]) u = w[0] / np.linalg.norm(w[0]) angle = np.arctan2(u[1], u[0]) angle = 180 * angle / np.pi # convert to degrees v *= 9 ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1], 180 + angle, color=color) ell.set_clip_box(ax.bbox) ell.set_alpha(0.5) ax.add_artist(ell) iris = datasets.load_iris() # Break up the dataset into non-overlapping training (75%) and testing # (25%) sets. skf = StratifiedKFold(iris.target, n_folds=4) # Only take the first fold. train_index, test_index = next(iter(skf)) X_train = iris.data[train_index] y_train = iris.target[train_index] X_test = iris.data[test_index] y_test = iris.target[test_index] n_classes = len(np.unique(y_train)) # Try GMMs using different types of covariances. classifiers = dict((covar_type, GMM(n_components=n_classes, covariance_type=covar_type, init_params='wc', n_iter=20)) for covar_type in ['spherical', 'diag', 'tied', 'full']) n_classifiers = len(classifiers) plt.figure(figsize=(3 * n_classifiers / 2, 6)) plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05, left=.01, right=.99) for index, (name, classifier) in enumerate(classifiers.items()): # Since we have class labels for the training data, we can # initialize the GMM parameters in a supervised manner. classifier.means_ = np.array([X_train[y_train == i].mean(axis=0) for i in xrange(n_classes)]) # Train the other parameters using the EM algorithm. classifier.fit(X_train) h = plt.subplot(2, n_classifiers / 2, index + 1) make_ellipses(classifier, h) for n, color in enumerate('rgb'): data = iris.data[iris.target == n] plt.scatter(data[:, 0], data[:, 1], 0.8, color=color, label=iris.target_names[n]) # Plot the test data with crosses for n, color in enumerate('rgb'): data = X_test[y_test == n] plt.plot(data[:, 0], data[:, 1], 'x', color=color) y_train_pred = classifier.predict(X_train) train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100 plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy, transform=h.transAxes) y_test_pred = classifier.predict(X_test) test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100 plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy, transform=h.transAxes) plt.xticks(()) plt.yticks(()) plt.title(name) plt.legend(loc='lower right', prop=dict(size=12)) plt.show()
bsd-3-clause
neale/CS-program
434-MachineLearning/final_project/linearClassifier/sklearn/__init__.py
27
3086
""" Machine learning module for Python ================================== sklearn is a Python module integrating classical machine learning algorithms in the tightly-knit world of scientific Python packages (numpy, scipy, matplotlib). It aims to provide simple and efficient solutions to learning problems that are accessible to everybody and reusable in various contexts: machine-learning as a versatile tool for science and engineering. See http://scikit-learn.org for complete documentation. """ import sys import re import warnings # Make sure that DeprecationWarning within this package always gets printed warnings.filterwarnings('always', category=DeprecationWarning, module='^{0}\.'.format(re.escape(__name__))) # PEP0440 compatible formatted version, see: # https://www.python.org/dev/peps/pep-0440/ # # Generic release markers: # X.Y # X.Y.Z # For bugfix releases # # Admissible pre-release markers: # X.YaN # Alpha release # X.YbN # Beta release # X.YrcN # Release Candidate # X.Y # Final release # # Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer. # 'X.Y.dev0' is the canonical version of 'X.Y.dev' # __version__ = '0.18.dev0' try: # This variable is injected in the __builtins__ by the build # process. It used to enable importing subpackages of sklearn when # the binaries are not built __SKLEARN_SETUP__ except NameError: __SKLEARN_SETUP__ = False if __SKLEARN_SETUP__: sys.stderr.write('Partial import of sklearn during the build process.\n') # We are not importing the rest of the scikit during the build # process, as it may not be compiled yet else: from . import __check_build from .base import clone __check_build # avoid flakes unused variable error __all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition', 'cross_validation', 'datasets', 'decomposition', 'dummy', 'ensemble', 'exceptions', 'externals', 'feature_extraction', 'feature_selection', 'gaussian_process', 'grid_search', 'isotonic', 'kernel_approximation', 'kernel_ridge', 'lda', 'learning_curve', 'linear_model', 'manifold', 'metrics', 'mixture', 'model_selection', 'multiclass', 'multioutput', 'naive_bayes', 'neighbors', 'neural_network', 'pipeline', 'preprocessing', 'qda', 'random_projection', 'semi_supervised', 'svm', 'tree', 'discriminant_analysis', # Non-modules: 'clone'] def setup_module(module): """Fixture for the tests to assure globally controllable seeding of RNGs""" import os import numpy as np import random # It could have been provided in the environment _random_seed = os.environ.get('SKLEARN_SEED', None) if _random_seed is None: _random_seed = np.random.uniform() * (2 ** 31 - 1) _random_seed = int(_random_seed) print("I: Seeding RNGs with %r" % _random_seed) np.random.seed(_random_seed) random.seed(_random_seed)
unlicense
etkirsch/scikit-learn
examples/model_selection/grid_search_text_feature_extraction.py
253
4158
""" ========================================================== Sample pipeline for text feature extraction and evaluation ========================================================== The dataset used in this example is the 20 newsgroups dataset which will be automatically downloaded and then cached and reused for the document classification example. You can adjust the number of categories by giving their names to the dataset loader or setting them to None to get the 20 of them. Here is a sample output of a run on a quad-core machine:: Loading 20 newsgroups dataset for categories: ['alt.atheism', 'talk.religion.misc'] 1427 documents 2 categories Performing grid search... pipeline: ['vect', 'tfidf', 'clf'] parameters: {'clf__alpha': (1.0000000000000001e-05, 9.9999999999999995e-07), 'clf__n_iter': (10, 50, 80), 'clf__penalty': ('l2', 'elasticnet'), 'tfidf__use_idf': (True, False), 'vect__max_n': (1, 2), 'vect__max_df': (0.5, 0.75, 1.0), 'vect__max_features': (None, 5000, 10000, 50000)} done in 1737.030s Best score: 0.940 Best parameters set: clf__alpha: 9.9999999999999995e-07 clf__n_iter: 50 clf__penalty: 'elasticnet' tfidf__use_idf: True vect__max_n: 2 vect__max_df: 0.75 vect__max_features: 50000 """ # Author: Olivier Grisel <olivier.grisel@ensta.org> # Peter Prettenhofer <peter.prettenhofer@gmail.com> # Mathieu Blondel <mathieu@mblondel.org> # License: BSD 3 clause from __future__ import print_function from pprint import pprint from time import time import logging from sklearn.datasets import fetch_20newsgroups from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.linear_model import SGDClassifier from sklearn.grid_search import GridSearchCV from sklearn.pipeline import Pipeline print(__doc__) # Display progress logs on stdout logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s') ############################################################################### # Load some categories from the training set categories = [ 'alt.atheism', 'talk.religion.misc', ] # Uncomment the following to do the analysis on all the categories #categories = None print("Loading 20 newsgroups dataset for categories:") print(categories) data = fetch_20newsgroups(subset='train', categories=categories) print("%d documents" % len(data.filenames)) print("%d categories" % len(data.target_names)) print() ############################################################################### # define a pipeline combining a text feature extractor with a simple # classifier pipeline = Pipeline([ ('vect', CountVectorizer()), ('tfidf', TfidfTransformer()), ('clf', SGDClassifier()), ]) # uncommenting more parameters will give better exploring power but will # increase processing time in a combinatorial way parameters = { 'vect__max_df': (0.5, 0.75, 1.0), #'vect__max_features': (None, 5000, 10000, 50000), 'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams #'tfidf__use_idf': (True, False), #'tfidf__norm': ('l1', 'l2'), 'clf__alpha': (0.00001, 0.000001), 'clf__penalty': ('l2', 'elasticnet'), #'clf__n_iter': (10, 50, 80), } if __name__ == "__main__": # multiprocessing requires the fork to happen in a __main__ protected # block # find the best parameters for both the feature extraction and the # classifier grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1) print("Performing grid search...") print("pipeline:", [name for name, _ in pipeline.steps]) print("parameters:") pprint(parameters) t0 = time() grid_search.fit(data.data, data.target) print("done in %0.3fs" % (time() - t0)) print() print("Best score: %0.3f" % grid_search.best_score_) print("Best parameters set:") best_parameters = grid_search.best_estimator_.get_params() for param_name in sorted(parameters.keys()): print("\t%s: %r" % (param_name, best_parameters[param_name]))
bsd-3-clause
sunyihuan326/DeltaLab
shuwei_fengge/practice_one/model/tt.py
1
3958
# coding:utf-8 ''' Created on 2017/12/8. @author: chk01 ''' import scipy.io as scio # data = scio.loadmat(file) # from sklearn.model_selection import train_test_split # # print(data['X'].shape) # print(data['Y'].shape) # X_train, X_test, Y_train, Y_test = train_test_split(data['X'], data['Y'], test_size=0.2) # print(X_train.shape) # print(Y_train.shape) # print(X_test.shape) # print(Y_test.shape) import numpy as np import scipy.io as scio import tensorflow as tf from practice_one.model.utils import * from tensorflow.contrib.factorization import KMeans from sklearn.ensemble import AdaBoostClassifier # print(np.e) # print(-np.log(np.e / (np.e + 8))) # ZL = tf.Variable([[0, 1, 0, 0, 0, 0, 0, 0, 0]], dtype=tf.float32) # print(ZL.shape) # Y = tf.constant([[0, 0, 0, 0, 0, 0, 1, 0, 0]], dtype=tf.float32) # Y = tf.get_variable(dtype=tf.float32, shape=(1, 2), name='tt',initializer=tf.contrib.layers.xavier_initializer()) # cor_op = tf.argmax(Y, 1) # pre_op = tf.argmax(ZL, 1) # cost1 = tf.square(tf.cast(cor_op - pre_op, dtype=tf.float32)) # lost = tf.reduce_mean( # cost1 + tf.nn.softmax_cross_entropy_with_logits(logits=ZL, # labels=Y)) # # loss = tf.reduce_sum(tf.where(tf.greater(y, y_), (y - y_) * loss_more, (y_ - y) * loss_less)) # train_op = tf.train.GradientDescentOptimizer(0.1).minimize(lost) # init = tf.global_variables_initializer() # with tf.Session() as sess: # sess.run(init) # for i in range(30): # sess.run(train_op) # print(sess.run(lost)) # print(sess.run(tf.reduce_mean(cost1))) # print(sess.run(tf.argmax(ZL, 1))) # 1.37195 # 2.37195 # parameters = scio.loadmat('kmeans_parameters.mat') # X_train, X_test, Y_train, Y_test = load_data("face_1_channel_sense.mat") # print(X_test.shape) # num_features = 28 # num_classes = 3 # # X = tf.placeholder(tf.float32, shape=[None, num_features]) # Y = tf.placeholder(tf.float32, shape=[None, num_classes]) # # kmeans = KMeans(inputs=X, num_clusters=300, # distance_metric='cosine', # use_mini_batch=True) # # (all_scores, cluster_idx, scores, cluster_centers_initialized, cluster_centers_var, init_op, # train_op) = kmeans.training_graph() # cluster_idx = cluster_idx[0] # fix for cluster_idx being a tuple # # # Initialize the variables (i.e. assign their default value) # init_vars = tf.global_variables_initializer() # # # Start TensorFlow session # sess = tf.Session() # sess.run(init_vars, feed_dict={X: X_test}) # sess.run(init_op, feed_dict={X: X_test}) # cl = sess.run(cluster_idx, feed_dict={X: X_train}) # print("cl",cl) # print(len(cl)) # parameters = scio.loadmat('kmeans_parameters.mat') # print("parameters",parameters['labels_map'][0]) # labels_map = tf.convert_to_tensor(parameters['labels_map'][0]) # # # Evaluation ops # # Lookup: centroid_id -> label # cluster_label = tf.nn.embedding_lookup(labels_map, cluster_idx) # # # Test Model # test_x, test_y = X_test, Y_test # with sess.as_default(): # cluster_label = cluster_label.eval(feed_dict={X: X_test}) # # c = 0 # for i in range(len(cluster_label)): # if abs(cluster_label[i] - np.argmax(Y_train, 1)[i]) > 1: # c += 1. / len(cluster_label) # print(c) # tt = scio.loadmat("tt_cluster_label.mat") # sense = scio.loadmat("sense_cluster.mat") # tt = tt["tt"][0] # se = sense["sense"][0] # for i in range(len(tt)): # if tt[i] != se[i]: # print(i, tt[i], se[i]) # # print('correct_prediction', correct_prediction) # index = [1, 2, 0, 2, 1, 2] # indice = [[0, 2, 1, 1, 1], [0, 1, 1, 2, 1]] # a = tf.one_hot(index, 3, axis=0) # b = tf.one_hot(indice, 3, axis=1) # with tf.Session() as sess: # print(sess.run(a)) # print("b", sess.run(b)) file = "face_1_channel_sense" X_train, X_test, Y_train, Y_test = load_data(file) clf = AdaBoostClassifier(n_estimators=100) Y_train = np.argmax(Y_train, 1) c = clf.fit(X_train, Y_train) print(c)
mit
krahman/BuildingMachineLearningSystemsWithPython
ch04/build_lda.py
1
2472
# This code is supporting material for the book # Building Machine Learning Systems with Python # by Willi Richert and Luis Pedro Coelho # published by PACKT Publishing # # It is made available under the MIT License from __future__ import print_function try: import nltk.corpus except ImportError: print("nltk not found") print("please install it") raise from scipy.spatial import distance import numpy as np import string from gensim import corpora, models, similarities import sklearn.datasets import nltk.stem from collections import defaultdict english_stemmer = nltk.stem.SnowballStemmer('english') stopwords = set(nltk.corpus.stopwords.words('english')) stopwords.update(['from:', 'subject:', 'writes:', 'writes']) class DirectText(corpora.textcorpus.TextCorpus): def get_texts(self): return self.input def __len__(self): return len(self.input) try: dataset = sklearn.datasets.load_mlcomp("20news-18828", "train", mlcomp_root='./data') except: print("Newsgroup data not found.") print("Please download from http://mlcomp.org/datasets/379") print("And expand the zip into the subdirectory data/") print() print() raise otexts = dataset.data texts = dataset.data texts = [t.decode('utf-8', 'ignore') for t in texts] texts = [t.split() for t in texts] texts = [map(lambda w: w.lower(), t) for t in texts] texts = [filter(lambda s: not len(set("+-.?!()>@012345689") & set(s)), t) for t in texts] texts = [filter(lambda s: (len(s) > 3) and (s not in stopwords), t) for t in texts] texts = [map(english_stemmer.stem, t) for t in texts] usage = defaultdict(int) for t in texts: for w in set(t): usage[w] += 1 limit = len(texts) / 10 too_common = [w for w in usage if usage[w] > limit] too_common = set(too_common) texts = [filter(lambda s: s not in too_common, t) for t in texts] corpus = DirectText(texts) dictionary = corpus.dictionary try: dictionary['computer'] except: pass model = models.ldamodel.LdaModel( corpus, num_topics=100, id2word=dictionary.id2token) thetas = np.zeros((len(texts), 100)) for i, c in enumerate(corpus): for ti, v in model[c]: thetas[i, ti] += v distances = distance.squareform(distance.pdist(thetas)) large = distances.max() + 1 for i in xrange(len(distances)): distances[i, i] = large print(otexts[1]) print() print() print() print(otexts[distances[1].argmin()])
mit
olologin/scikit-learn
examples/linear_model/plot_sgd_iris.py
286
2202
""" ======================================== Plot multi-class SGD on the iris dataset ======================================== Plot decision surface of multi-class SGD on iris dataset. The hyperplanes corresponding to the three one-versus-all (OVA) classifiers are represented by the dashed lines. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import datasets from sklearn.linear_model import SGDClassifier # import some data to play with iris = datasets.load_iris() X = iris.data[:, :2] # we only take the first two features. We could # avoid this ugly slicing by using a two-dim dataset y = iris.target colors = "bry" # shuffle idx = np.arange(X.shape[0]) np.random.seed(13) np.random.shuffle(idx) X = X[idx] y = y[idx] # standardize mean = X.mean(axis=0) std = X.std(axis=0) X = (X - mean) / std h = .02 # step size in the mesh clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y) # create a mesh to plot in x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, m_max]x[y_min, y_max]. Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired) plt.axis('tight') # Plot also the training points for i, color in zip(clf.classes_, colors): idx = np.where(y == i) plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i], cmap=plt.cm.Paired) plt.title("Decision surface of multi-class SGD") plt.axis('tight') # Plot the three one-against-all classifiers xmin, xmax = plt.xlim() ymin, ymax = plt.ylim() coef = clf.coef_ intercept = clf.intercept_ def plot_hyperplane(c, color): def line(x0): return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1] plt.plot([xmin, xmax], [line(xmin), line(xmax)], ls="--", color=color) for i, color in zip(clf.classes_, colors): plot_hyperplane(i, color) plt.legend() plt.show()
bsd-3-clause
ishanic/scikit-learn
sklearn/manifold/tests/test_t_sne.py
162
9771
import sys from sklearn.externals.six.moves import cStringIO as StringIO import numpy as np import scipy.sparse as sp from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_raises_regexp from sklearn.utils import check_random_state from sklearn.manifold.t_sne import _joint_probabilities from sklearn.manifold.t_sne import _kl_divergence from sklearn.manifold.t_sne import _gradient_descent from sklearn.manifold.t_sne import trustworthiness from sklearn.manifold.t_sne import TSNE from sklearn.manifold._utils import _binary_search_perplexity from scipy.optimize import check_grad from scipy.spatial.distance import pdist from scipy.spatial.distance import squareform def test_gradient_descent_stops(): # Test stopping conditions of gradient descent. class ObjectiveSmallGradient: def __init__(self): self.it = -1 def __call__(self, _): self.it += 1 return (10 - self.it) / 10.0, np.array([1e-5]) def flat_function(_): return 0.0, np.ones(1) # Gradient norm old_stdout = sys.stdout sys.stdout = StringIO() try: _, error, it = _gradient_descent( ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100, n_iter_without_progress=100, momentum=0.0, learning_rate=0.0, min_gain=0.0, min_grad_norm=1e-5, min_error_diff=0.0, verbose=2) finally: out = sys.stdout.getvalue() sys.stdout.close() sys.stdout = old_stdout assert_equal(error, 1.0) assert_equal(it, 0) assert("gradient norm" in out) # Error difference old_stdout = sys.stdout sys.stdout = StringIO() try: _, error, it = _gradient_descent( ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100, n_iter_without_progress=100, momentum=0.0, learning_rate=0.0, min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.2, verbose=2) finally: out = sys.stdout.getvalue() sys.stdout.close() sys.stdout = old_stdout assert_equal(error, 0.9) assert_equal(it, 1) assert("error difference" in out) # Maximum number of iterations without improvement old_stdout = sys.stdout sys.stdout = StringIO() try: _, error, it = _gradient_descent( flat_function, np.zeros(1), 0, n_iter=100, n_iter_without_progress=10, momentum=0.0, learning_rate=0.0, min_gain=0.0, min_grad_norm=0.0, min_error_diff=-1.0, verbose=2) finally: out = sys.stdout.getvalue() sys.stdout.close() sys.stdout = old_stdout assert_equal(error, 0.0) assert_equal(it, 11) assert("did not make any progress" in out) # Maximum number of iterations old_stdout = sys.stdout sys.stdout = StringIO() try: _, error, it = _gradient_descent( ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=11, n_iter_without_progress=100, momentum=0.0, learning_rate=0.0, min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.0, verbose=2) finally: out = sys.stdout.getvalue() sys.stdout.close() sys.stdout = old_stdout assert_equal(error, 0.0) assert_equal(it, 10) assert("Iteration 10" in out) def test_binary_search(): # Test if the binary search finds Gaussians with desired perplexity. random_state = check_random_state(0) distances = random_state.randn(50, 2) distances = distances.dot(distances.T) np.fill_diagonal(distances, 0.0) desired_perplexity = 25.0 P = _binary_search_perplexity(distances, desired_perplexity, verbose=0) P = np.maximum(P, np.finfo(np.double).eps) mean_perplexity = np.mean([np.exp(-np.sum(P[i] * np.log(P[i]))) for i in range(P.shape[0])]) assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3) def test_gradient(): # Test gradient of Kullback-Leibler divergence. random_state = check_random_state(0) n_samples = 50 n_features = 2 n_components = 2 alpha = 1.0 distances = random_state.randn(n_samples, n_features) distances = distances.dot(distances.T) np.fill_diagonal(distances, 0.0) X_embedded = random_state.randn(n_samples, n_components) P = _joint_probabilities(distances, desired_perplexity=25.0, verbose=0) fun = lambda params: _kl_divergence(params, P, alpha, n_samples, n_components)[0] grad = lambda params: _kl_divergence(params, P, alpha, n_samples, n_components)[1] assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0, decimal=5) def test_trustworthiness(): # Test trustworthiness score. random_state = check_random_state(0) # Affine transformation X = random_state.randn(100, 2) assert_equal(trustworthiness(X, 5.0 + X / 10.0), 1.0) # Randomly shuffled X = np.arange(100).reshape(-1, 1) X_embedded = X.copy() random_state.shuffle(X_embedded) assert_less(trustworthiness(X, X_embedded), 0.6) # Completely different X = np.arange(5).reshape(-1, 1) X_embedded = np.array([[0], [2], [4], [1], [3]]) assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2) def test_preserve_trustworthiness_approximately(): # Nearest neighbors should be preserved approximately. random_state = check_random_state(0) X = random_state.randn(100, 2) for init in ('random', 'pca'): tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0, init=init, random_state=0) X_embedded = tsne.fit_transform(X) assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 1.0, decimal=1) def test_fit_csr_matrix(): # X can be a sparse matrix. random_state = check_random_state(0) X = random_state.randn(100, 2) X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0 X_csr = sp.csr_matrix(X) tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0, random_state=0) X_embedded = tsne.fit_transform(X_csr) assert_almost_equal(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0, decimal=1) def test_preserve_trustworthiness_approximately_with_precomputed_distances(): # Nearest neighbors should be preserved approximately. random_state = check_random_state(0) X = random_state.randn(100, 2) D = squareform(pdist(X), "sqeuclidean") tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0, metric="precomputed", random_state=0) X_embedded = tsne.fit_transform(D) assert_almost_equal(trustworthiness(D, X_embedded, n_neighbors=1, precomputed=True), 1.0, decimal=1) def test_early_exaggeration_too_small(): # Early exaggeration factor must be >= 1. tsne = TSNE(early_exaggeration=0.99) assert_raises_regexp(ValueError, "early_exaggeration .*", tsne.fit_transform, np.array([[0.0]])) def test_too_few_iterations(): # Number of gradient descent iterations must be at least 200. tsne = TSNE(n_iter=199) assert_raises_regexp(ValueError, "n_iter .*", tsne.fit_transform, np.array([[0.0]])) def test_non_square_precomputed_distances(): # Precomputed distance matrices must be square matrices. tsne = TSNE(metric="precomputed") assert_raises_regexp(ValueError, ".* square distance matrix", tsne.fit_transform, np.array([[0.0], [1.0]])) def test_init_not_available(): # 'init' must be 'pca' or 'random'. assert_raises_regexp(ValueError, "'init' must be either 'pca' or 'random'", TSNE, init="not available") def test_distance_not_available(): # 'metric' must be valid. tsne = TSNE(metric="not available") assert_raises_regexp(ValueError, "Unknown metric not available.*", tsne.fit_transform, np.array([[0.0], [1.0]])) def test_pca_initialization_not_compatible_with_precomputed_kernel(): # Precomputed distance matrices must be square matrices. tsne = TSNE(metric="precomputed", init="pca") assert_raises_regexp(ValueError, "The parameter init=\"pca\" cannot be " "used with metric=\"precomputed\".", tsne.fit_transform, np.array([[0.0], [1.0]])) def test_verbose(): random_state = check_random_state(0) tsne = TSNE(verbose=2) X = random_state.randn(5, 2) old_stdout = sys.stdout sys.stdout = StringIO() try: tsne.fit_transform(X) finally: out = sys.stdout.getvalue() sys.stdout.close() sys.stdout = old_stdout assert("[t-SNE]" in out) assert("Computing pairwise distances" in out) assert("Computed conditional probabilities" in out) assert("Mean sigma" in out) assert("Finished" in out) assert("early exaggeration" in out) assert("Finished" in out) def test_chebyshev_metric(): # t-SNE should allow metrics that cannot be squared (issue #3526). random_state = check_random_state(0) tsne = TSNE(metric="chebyshev") X = random_state.randn(5, 2) tsne.fit_transform(X) def test_reduction_to_one_component(): # t-SNE should allow reduction to one component (issue #4154). random_state = check_random_state(0) tsne = TSNE(n_components=1) X = random_state.randn(5, 2) X_embedded = tsne.fit(X).embedding_ assert(np.all(np.isfinite(X_embedded)))
bsd-3-clause
hmendozap/auto-sklearn
autosklearn/metalearning/metafeatures/plot_metafeatures.py
1
20297
from __future__ import print_function import argparse import cPickle import itertools import os import StringIO import sys import matplotlib.pyplot as plt import numpy as np import pandas as pd from sklearn.decomposition import PCA try: from sklearn.manifold import TSNE from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS import sklearn.metrics.pairwise except: print("Failed to load TSNE, probably you're using sklearn 0.14.X") from pyMetaLearn.metalearning.meta_base import MetaBase import pyMetaLearn.metalearning.create_datasets import pyMetaLearn.data_repositories.openml.apiconnector def load_dataset(dataset, dataset_directory): dataset_dir = os.path.abspath(os.path.join(dataset_directory, dataset)) fh = open(os.path.join(dataset_dir, dataset + ".pkl")) ds = cPickle.load(fh) fh.close() data_frame = ds.convert_arff_structure_to_pandas(ds .get_unprocessed_files()) class_ = data_frame.keys()[-1] attributes = data_frame.keys()[0:-1] X = data_frame[attributes] Y = data_frame[class_] return X, Y def plot_metafeatures(metafeatures_plot_dir, metafeatures, metafeatures_times, runs, method='pca', seed=1, depth=1, distance='l2'): """Project datasets in a 2d space and plot them. arguments: * metafeatures_plot_dir: a directory to save the generated plots * metafeatures: a pandas Dataframe from the MetaBase * runs: a dictionary of runs from the MetaBase * method: either pca or t-sne * seed: only used for t-sne * depth: if 1, a one-step look-ahead is performed """ if type(metafeatures) != pd.DataFrame: raise ValueError("Argument metafeatures must be of type pd.Dataframe " "but is %s" % str(type(metafeatures))) ############################################################################ # Write out the datasets and their size as a TEX table # TODO put this in an own function dataset_tex = StringIO.StringIO() dataset_tex.write('\\begin{tabular}{lrrr}\n') dataset_tex.write('\\textbf{Dataset name} & ' '\\textbf{\#features} & ' '\\textbf{\#patterns} & ' '\\textbf{\#classes} \\\\\n') num_features = [] num_instances = [] num_classes = [] for dataset in sorted(metafeatures.index): dataset_tex.write('%s & %d & %d & %d \\\\\n' % ( dataset.replace('larochelle_etal_2007_', '').replace( '_', '-'), metafeatures.loc[dataset]['number_of_features'], metafeatures.loc[dataset]['number_of_instances'], metafeatures.loc[dataset]['number_of_classes'])) num_features.append(metafeatures.loc[dataset]['number_of_features']) num_instances.append(metafeatures.loc[dataset]['number_of_instances']) num_classes.append(metafeatures.loc[dataset]['number_of_classes']) dataset_tex.write('Minimum & %.1f & %.1f & %.1f \\\\\n' % (np.min(num_features), np.min(num_instances), np.min(num_classes))) dataset_tex.write('Maximum & %.1f & %.1f & %.1f \\\\\n' % (np.max(num_features), np.max(num_instances), np.max(num_classes))) dataset_tex.write('Mean & %.1f & %.1f & %.1f \\\\\n' % (np.mean(num_features), np.mean(num_instances), np.mean(num_classes))) dataset_tex.write('10\\%% quantile & %.1f & %.1f & %.1f \\\\\n' % ( np.percentile(num_features, 10), np.percentile(num_instances, 10), np.percentile(num_classes, 10))) dataset_tex.write('90\\%% quantile & %.1f & %.1f & %.1f \\\\\n' % ( np.percentile(num_features, 90), np.percentile(num_instances, 90), np.percentile(num_classes, 90))) dataset_tex.write('median & %.1f & %.1f & %.1f \\\\\n' % ( np.percentile(num_features, 50), np.percentile(num_instances, 50), np.percentile(num_classes, 50))) dataset_tex.write('\\end{tabular}') dataset_tex.seek(0) dataset_tex_output = os.path.join(metafeatures_plot_dir, 'datasets.tex') with open(dataset_tex_output, 'w') as fh: fh.write(dataset_tex.getvalue()) ############################################################################ # Write out a list of metafeatures, each with the min/max/mean # calculation time and the min/max/mean value metafeatures_tex = StringIO.StringIO() metafeatures_tex.write('\\begin{tabular}{lrrrrrr}\n') metafeatures_tex.write('\\textbf{Metafeature} & ' '\\textbf{Minimum} & ' '\\textbf{Mean} & ' '\\textbf{Maximum} &' '\\textbf{Minimum time} &' '\\textbf{Mean time} &' '\\textbf{Maximum time} ' '\\\\\n') for mf_name in sorted(metafeatures.columns): metafeatures_tex.write('%s & %.2f & %.2f & %.2f & %.2f & %.2f & %.2f \\\\\n' % (mf_name.replace('_', '-'), metafeatures.loc[:,mf_name].min(), metafeatures.loc[:,mf_name].mean(), metafeatures.loc[:,mf_name].max(), metafeature_times.loc[:, mf_name].min(), metafeature_times.loc[:, mf_name].mean(), metafeature_times.loc[:, mf_name].max())) metafeatures_tex.write('\\end{tabular}') metafeatures_tex.seek(0) metafeatures_tex_output = os.path.join(metafeatures_plot_dir, 'metafeatures.tex') with open(metafeatures_tex_output, 'w') as fh: fh.write(metafeatures_tex.getvalue()) # Without this scaling the transformation for visualization purposes is # useless metafeatures = metafeatures.copy() X_min = np.nanmin(metafeatures, axis=0) X_max = np.nanmax(metafeatures, axis=0) metafeatures = (metafeatures - X_min) / (X_max - X_min) # PCA if method == 'pca': pca = PCA(2) transformation = pca.fit_transform(metafeatures.values) elif method == 't-sne': if distance == 'l2': distance_matrix = sklearn.metrics.pairwise.pairwise_distances( metafeatures.values, metric='l2') elif distance == 'l1': distance_matrix = sklearn.metrics.pairwise.pairwise_distances( metafeatures.values, metric='l1') elif distance == 'runs': names_to_indices = dict() for metafeature in metafeatures.index: idx = len(names_to_indices) names_to_indices[metafeature] = idx X, Y = pyMetaLearn.metalearning.create_datasets\ .create_predict_spearman_rank(metafeatures, runs, 'combination') # Make a metric matrix out of Y distance_matrix = np.zeros((metafeatures.shape[0], metafeatures.shape[0]), dtype=np.float64) for idx in Y.index: dataset_names = idx.split("_") d1 = names_to_indices[dataset_names[0]] d2 = names_to_indices[dataset_names[1]] distance_matrix[d1][d2] = Y.loc[idx] distance_matrix[d2][d1] = Y.loc[idx] else: raise NotImplementedError() # For whatever reason, tsne doesn't accept l1 metric tsne = TSNE(random_state=seed, perplexity=50, verbose=1) transformation = tsne.fit_transform(distance_matrix) # Transform the transformation back to range [0, 1] to ease plotting transformation_min = np.nanmin(transformation, axis=0) transformation_max = np.nanmax(transformation, axis=0) transformation = (transformation - transformation_min) / \ (transformation_max - transformation_min) print(transformation_min, transformation_max) #for i, dataset in enumerate(directory_content): # print dataset, meta_feature_array[i] fig = plt.figure(dpi=600, figsize=(12, 12)) ax = plt.subplot(111) # The dataset names must be aligned at the borders of the plot in a way # the arrows don't cross each other. First, define the different slots # where the labels will be positioned and then figure out the optimal # order of the labels slots = [] # 25 datasets on the top y-axis slots.extend([(-0.1 + 0.05 * i, 1.1) for i in range(25)]) # 24 datasets on the right x-axis slots.extend([(1.1, 1.05 - 0.05 * i) for i in range(24)]) # 25 datasets on the bottom y-axis slots.extend([(-0.1 + 0.05 * i, -0.1) for i in range(25)]) # 24 datasets on the left x-axis slots.extend([(-0.1, 1.05 - 0.05 * i) for i in range(24)]) # Align the labels on the outer axis labels_top = [] labels_left = [] labels_right = [] labels_bottom = [] for values in zip(metafeatures.index, transformation[:, 0], transformation[:, 1]): label, x, y = values # Although all plot area goes up to 1.1, 1.1, the range of all the # points lies inside [0,1] if x >= y and x < 1.0 - y: labels_bottom.append((x, label)) elif x >= y and x >= 1.0 - y: labels_right.append((y, label)) elif y > x and x <= 1.0 -y: labels_left.append((y, label)) else: labels_top.append((x, label)) # Sort the labels according to their alignment labels_bottom.sort() labels_left.sort() labels_left.reverse() labels_right.sort() labels_right.reverse() labels_top.sort() # Build an index label -> x, y points = {} for values in zip(metafeatures.index, transformation[:, 0], transformation[:, 1]): label, x, y = values points[label] = (x, y) # Find out the final positions... positions_top = {} positions_left = {} positions_right = {} positions_bottom = {} # Find the actual positions for i, values in enumerate(labels_bottom): y, label = values margin = 1.2 / len(labels_bottom) positions_bottom[label] = (-0.05 + i * margin, -0.1,) for i, values in enumerate(labels_left): x, label = values margin = 1.2 / len(labels_left) positions_left[label] = (-0.1, 1.1 - i * margin) for i, values in enumerate(labels_top): y, label = values margin = 1.2 / len(labels_top) positions_top[label] = (-0.05 + i * margin, 1.1) for i, values in enumerate(labels_right): y, label = values margin = 1.2 / len(labels_right) positions_right[label] = (1.1, 1.05 - i * margin) # Do greedy resorting if it decreases the number of intersections... def resort(label_positions, marker_positions, maxdepth=1): # TODO: are the inputs dicts or lists # TODO: two-step look-ahead def intersect(start1, end1, start2, end2): # Compute if there is an intersection, for the algorithm see # Computer Graphics by F.S.Hill # If one vector is just a point, it cannot intersect with a line... for v in [start1, start2, end1, end2]: if not np.isfinite(v).all(): return False # Obviously there is no intersection def perpendicular(d): return np.array((-d[1], d[0])) d1 = end1 - start1 # denoted b d2 = end2 - start2 # denoted d d2_1 = start2 - start1 # denoted c d1_perp = perpendicular(d1) # denoted by b_perp d2_perp = perpendicular(d2) # denoted by d_perp t = np.dot(d2_1, d2_perp) / np.dot(d1, d2_perp) u = - np.dot(d2_1, d1_perp) / np.dot(d2, d1_perp) if 0 <= t <= 1 and 0 <= u <= 1: return True # There is an intersection else: return False # There is no intersection def number_of_intersections(label_positions, marker_positions): num = 0 for key1, key2 in itertools.permutations(label_positions, r=2): s1 = np.array(label_positions[key1]) e1 = np.array(marker_positions[key1]) s2 = np.array(label_positions[key2]) e2 = np.array(marker_positions[key2]) if intersect(s1, e1, s2, e2): num += 1 return num # test if swapping two lines would decrease the number of intersections # TODO: if this was done with a datastructure different than dicts, # it could be much faster, because there is a lot of redundant # computing performed in the second iteration def swap(label_positions, marker_positions, depth=0, maxdepth=maxdepth, best_found=sys.maxint): if len(label_positions) <= 1: return two_step_look_ahead = False while True: improvement = False for key1, key2 in itertools.combinations(label_positions, r=2): before = number_of_intersections(label_positions, marker_positions) # swap: tmp = label_positions[key1] label_positions[key1] = label_positions[key2] label_positions[key2] = tmp if depth < maxdepth and two_step_look_ahead: swap(label_positions, marker_positions, depth=depth+1, best_found=before) after = number_of_intersections(label_positions, marker_positions) if best_found > after and before > after: improvement = True print(before, after) print("Depth %d: Swapped %s with %s" % (depth, key1, key2)) else: # swap back... tmp = label_positions[key1] label_positions[key1] = label_positions[key2] label_positions[key2] = tmp if after == 0: break # If it is not yet sorted perfectly, do another pass with # two-step lookahead if before == 0: print("Sorted perfectly...") break print(depth, two_step_look_ahead) if two_step_look_ahead: break if maxdepth == depth: print("Reached maximum recursion depth...") break if not improvement and depth < maxdepth: print("Still %d errors, trying two-step lookahead" % before) two_step_look_ahead = True swap(label_positions, marker_positions, maxdepth=maxdepth) resort(positions_bottom, points, maxdepth=depth) resort(positions_left, points, maxdepth=depth) resort(positions_right, points, maxdepth=depth) resort(positions_top, points, maxdepth=depth) # Helper function def plot(x, y, label_x, label_y, label, ha, va, relpos, rotation=0): ax.scatter(x, y, marker='o', label=label, s=80, linewidths=0.1, color='blue', edgecolor='black') label = label.replace('larochelle_etal_2007_', '') x = ax.annotate(label, xy=(x, y), xytext=(label_x, label_y), ha=ha, va=va, rotation=rotation, bbox=dict(boxstyle='round', fc='gray', alpha=0.5), arrowprops=dict(arrowstyle='->', color='black', relpos=relpos)) # Do the plotting for i, key in enumerate(positions_bottom): x, y = positions_bottom[key] plot(points[key][0], points[key][1], x, y, key, ha='right', va='top', rotation=45, relpos=(1, 1)) for i, key in enumerate(positions_left): x, y = positions_left[key] plot(points[key][0], points[key][1], x, y, key, ha='right', va='top', rotation=45, relpos=(1, 1)) for i, key in enumerate(positions_top): x, y = positions_top[key] plot(points[key][0], points[key][1], x, y, key, ha='left', va='bottom', rotation=45, relpos=(0, 0)) for i, key in enumerate(positions_right): x, y = positions_right[key] plot(points[key][0], points[key][1], x, y, key, ha='left', va='bottom', rotation=45, relpos=(0, 0)) # Resize everything box = ax.get_position() remove = 0.05 * box.width ax.set_position([box.x0 + remove, box.y0 + remove, box.width - remove*2, box.height - remove*2]) locs_x = ax.get_xticks() locs_y = ax.get_yticks() ax.set_xticklabels([]) ax.set_yticklabels([]) ax.set_xlim((-0.1, 1.1)) ax.set_ylim((-0.1, 1.1)) plt.savefig(os.path.join(metafeatures_plot_dir, "pca.png")) plt.savefig(os.path.join(metafeatures_plot_dir, "pca.pdf")) plt.clf() # Relation of features to each other... #correlations = [] #for mf_1, mf_2 in itertools.combinations(metafeatures.columns, 2): # x = metafeatures.loc[:, mf_1] # y = metafeatures.loc[:, mf_2] # rho, p = scipy.stats.spearmanr(x, y) # correlations.append((rho, "%s-%s" % (mf_1, mf_2))) # plt.figure() # plt.plot(np.arange(0, 1, 0.01), np.arange(0, 1, 0.01)) # plt.plot(x, y, "x") # plt.xlabel(mf_1) # plt.ylabel(mf_2) # plt.xlim((0, 1)) # plt.ylim((0, 1)) # plt.savefig(os.path.join(target_directory, mf_1 + "__" + mf_2 + " # .png")) # plt.close() #correlations.sort() #for cor in correlations: #print cor if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--tasks", required=True, type=str) parser.add_argument("--runs", type=str) parser.add_argument("experiment_directory", type=str) parser.add_argument("-m", "--method", default='pca', choices=['pca', 't-sne'], help="Dimensionality reduction method") parser.add_argument("--distance", choices=[None, 'l1', 'l2', 'runs'], default='l2') parser.add_argument("-s", "--seed", default=1, type=int) parser.add_argument("-d", "--depth", default=0, type=int) parser.add_argument("--subset", default='all', choices=['all', 'pfahringer_2000_experiment1']) args = parser.parse_args() with open(args.tasks) as fh: task_files_list = fh.readlines() # Load all the experiment run data only if needed if args.distance == 'runs': with open(args.runs) as fh: experiments_file_list = fh.readlines() else: experiments_file_list = StringIO.StringIO() for i in range(len(task_files_list)): experiments_file_list.write("\n") experiments_file_list.seek(0) pyMetaLearn.data_repositories.openml.apiconnector.set_local_directory( args.experiment_directory) meta_base = MetaBase(task_files_list, experiments_file_list) metafeatures = meta_base.get_all_metafeatures_as_pandas( metafeature_subset=args.subset) metafeature_times = meta_base.get_all_metafeatures_times_as_pandas( metafeature_subset=args.subset) #if args.subset: # metafeatures = metafeatures.loc[:,subsets[args.subset]] # metafeature_times = metafeature_times.loc[:,subsets[args.subset]] runs = meta_base.get_all_runs() general_plot_directory = os.path.join(args.experiment_directory, "plots") try: os.mkdir(general_plot_directory) except: pass metafeatures_plot_dir = os.path.join(general_plot_directory, "metafeatures") try: os.mkdir(metafeatures_plot_dir) except: pass plot_metafeatures(metafeatures_plot_dir, metafeatures, metafeature_times, runs, method=args.method, seed=args.seed, depth=args.depth, distance=args.distance)
bsd-3-clause
yaojenkuo/BuildingMachineLearningSystemsWithPython
ch03/rel_post_20news.py
24
3903
# This code is supporting material for the book # Building Machine Learning Systems with Python # by Willi Richert and Luis Pedro Coelho # published by PACKT Publishing # # It is made available under the MIT License import sklearn.datasets import scipy as sp new_post = \ """Disk drive problems. Hi, I have a problem with my hard disk. After 1 year it is working only sporadically now. I tried to format it, but now it doesn't boot any more. Any ideas? Thanks. """ print("""\ Dear reader of the 1st edition of 'Building Machine Learning Systems with Python'! For the 2nd edition we introduced a couple of changes that will result into results that differ from the results in the 1st edition. E.g. we now fully rely on scikit's fetch_20newsgroups() instead of requiring you to download the data manually from MLCOMP. If you have any questions, please ask at http://www.twotoreal.com """) all_data = sklearn.datasets.fetch_20newsgroups(subset="all") print("Number of total posts: %i" % len(all_data.filenames)) # Number of total posts: 18846 groups = [ 'comp.graphics', 'comp.os.ms-windows.misc', 'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware', 'comp.windows.x', 'sci.space'] train_data = sklearn.datasets.fetch_20newsgroups(subset="train", categories=groups) print("Number of training posts in tech groups:", len(train_data.filenames)) # Number of training posts in tech groups: 3529 labels = train_data.target num_clusters = 50 # sp.unique(labels).shape[0] import nltk.stem english_stemmer = nltk.stem.SnowballStemmer('english') from sklearn.feature_extraction.text import TfidfVectorizer class StemmedTfidfVectorizer(TfidfVectorizer): def build_analyzer(self): analyzer = super(TfidfVectorizer, self).build_analyzer() return lambda doc: (english_stemmer.stem(w) for w in analyzer(doc)) vectorizer = StemmedTfidfVectorizer(min_df=10, max_df=0.5, stop_words='english', decode_error='ignore' ) vectorized = vectorizer.fit_transform(train_data.data) num_samples, num_features = vectorized.shape print("#samples: %d, #features: %d" % (num_samples, num_features)) # samples: 3529, #features: 4712 from sklearn.cluster import KMeans km = KMeans(n_clusters=num_clusters, n_init=1, verbose=1, random_state=3) clustered = km.fit(vectorized) print("km.labels_=%s" % km.labels_) # km.labels_=[ 6 34 22 ..., 2 21 26] print("km.labels_.shape=%s" % km.labels_.shape) # km.labels_.shape=3529 from sklearn import metrics print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_)) # Homogeneity: 0.400 print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_)) # Completeness: 0.206 print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_)) # V-measure: 0.272 print("Adjusted Rand Index: %0.3f" % metrics.adjusted_rand_score(labels, km.labels_)) # Adjusted Rand Index: 0.064 print("Adjusted Mutual Information: %0.3f" % metrics.adjusted_mutual_info_score(labels, km.labels_)) # Adjusted Mutual Information: 0.197 print(("Silhouette Coefficient: %0.3f" % metrics.silhouette_score(vectorized, labels, sample_size=1000))) # Silhouette Coefficient: 0.006 new_post_vec = vectorizer.transform([new_post]) new_post_label = km.predict(new_post_vec)[0] similar_indices = (km.labels_ == new_post_label).nonzero()[0] similar = [] for i in similar_indices: dist = sp.linalg.norm((new_post_vec - vectorized[i]).toarray()) similar.append((dist, train_data.data[i])) similar = sorted(similar) print("Count similar: %i" % len(similar)) show_at_1 = similar[0] show_at_2 = similar[int(len(similar) / 10)] show_at_3 = similar[int(len(similar) / 2)] print("=== #1 ===") print(show_at_1) print() print("=== #2 ===") print(show_at_2) print() print("=== #3 ===") print(show_at_3)
mit
hitszxp/scikit-learn
sklearn/linear_model/tests/test_ransac.py
40
12814
import numpy as np from numpy.testing import assert_equal, assert_raises from numpy.testing import assert_array_almost_equal from scipy import sparse from sklearn.utils.testing import assert_less from sklearn.linear_model import LinearRegression, RANSACRegressor from sklearn.linear_model.ransac import _dynamic_max_trials # Generate coordinates of line X = np.arange(-200, 200) y = 0.2 * X + 20 data = np.column_stack([X, y]) # Add some faulty data outliers = np.array((10, 30, 200)) data[outliers[0], :] = (1000, 1000) data[outliers[1], :] = (-1000, -1000) data[outliers[2], :] = (-100, -50) X = data[:, 0][:, np.newaxis] y = data[:, 1] def test_ransac_inliers_outliers(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) # Estimate parameters of corrupted data ransac_estimator.fit(X, y) # Ground truth / reference inlier mask ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_is_data_valid(): def is_data_valid(X, y): assert_equal(X.shape[0], 2) assert_equal(y.shape[0], 2) return False X = np.random.rand(10, 2) y = np.random.rand(10, 1) base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, is_data_valid=is_data_valid, random_state=0) assert_raises(ValueError, ransac_estimator.fit, X, y) def test_ransac_is_model_valid(): def is_model_valid(estimator, X, y): assert_equal(X.shape[0], 2) assert_equal(y.shape[0], 2) return False base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, is_model_valid=is_model_valid, random_state=0) assert_raises(ValueError, ransac_estimator.fit, X, y) def test_ransac_max_trials(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, max_trials=0, random_state=0) assert_raises(ValueError, ransac_estimator.fit, X, y) ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, max_trials=11, random_state=0) assert getattr(ransac_estimator, 'n_trials_', None) is None ransac_estimator.fit(X, y) assert_equal(ransac_estimator.n_trials_, 2) def test_ransac_stop_n_inliers(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, stop_n_inliers=2, random_state=0) ransac_estimator.fit(X, y) assert_equal(ransac_estimator.n_trials_, 1) def test_ransac_stop_score(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, stop_score=0, random_state=0) ransac_estimator.fit(X, y) assert_equal(ransac_estimator.n_trials_, 1) def test_ransac_score(): X = np.arange(100)[:, None] y = np.zeros((100, )) y[0] = 1 y[1] = 100 base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=0.5, random_state=0) ransac_estimator.fit(X, y) assert_equal(ransac_estimator.score(X[2:], y[2:]), 1) assert_less(ransac_estimator.score(X[:2], y[:2]), 1) def test_ransac_predict(): X = np.arange(100)[:, None] y = np.zeros((100, )) y[0] = 1 y[1] = 100 base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=0.5, random_state=0) ransac_estimator.fit(X, y) assert_equal(ransac_estimator.predict(X), np.zeros((100, 1))) def test_ransac_sparse_coo(): X_sparse = sparse.coo_matrix(X) base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_estimator.fit(X_sparse, y) ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_sparse_csr(): X_sparse = sparse.csr_matrix(X) base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_estimator.fit(X_sparse, y) ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_sparse_csc(): X_sparse = sparse.csc_matrix(X) base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_estimator.fit(X_sparse, y) ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_none_estimator(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_none_estimator = RANSACRegressor(None, 2, 5, random_state=0) ransac_estimator.fit(X, y) ransac_none_estimator.fit(X, y) assert_array_almost_equal(ransac_estimator.predict(X), ransac_none_estimator.predict(X)) def test_ransac_min_n_samples(): base_estimator = LinearRegression() ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2. / X.shape[0], residual_threshold=5, random_state=0) ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=-1, residual_threshold=5, random_state=0) ransac_estimator4 = RANSACRegressor(base_estimator, min_samples=5.2, residual_threshold=5, random_state=0) ransac_estimator5 = RANSACRegressor(base_estimator, min_samples=2.0, residual_threshold=5, random_state=0) ransac_estimator6 = RANSACRegressor(base_estimator, residual_threshold=5, random_state=0) ransac_estimator7 = RANSACRegressor(base_estimator, min_samples=X.shape[0] + 1, residual_threshold=5, random_state=0) ransac_estimator1.fit(X, y) ransac_estimator2.fit(X, y) ransac_estimator5.fit(X, y) ransac_estimator6.fit(X, y) assert_array_almost_equal(ransac_estimator1.predict(X), ransac_estimator2.predict(X)) assert_array_almost_equal(ransac_estimator1.predict(X), ransac_estimator5.predict(X)) assert_array_almost_equal(ransac_estimator1.predict(X), ransac_estimator6.predict(X)) assert_raises(ValueError, ransac_estimator3.fit, X, y) assert_raises(ValueError, ransac_estimator4.fit, X, y) assert_raises(ValueError, ransac_estimator7.fit, X, y) def test_ransac_multi_dimensional_targets(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) # 3-D target values yyy = np.column_stack([y, y, y]) # Estimate parameters of corrupted data ransac_estimator.fit(X, yyy) # Ground truth / reference inlier mask ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_residual_metric(): residual_metric1 = lambda dy: np.sum(np.abs(dy), axis=1) residual_metric2 = lambda dy: np.sum(dy ** 2, axis=1) yyy = np.column_stack([y, y, y]) base_estimator = LinearRegression() ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0, residual_metric=residual_metric1) ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0, residual_metric=residual_metric2) # multi-dimensional ransac_estimator0.fit(X, yyy) ransac_estimator1.fit(X, yyy) ransac_estimator2.fit(X, yyy) assert_array_almost_equal(ransac_estimator0.predict(X), ransac_estimator1.predict(X)) assert_array_almost_equal(ransac_estimator0.predict(X), ransac_estimator2.predict(X)) # one-dimensional ransac_estimator0.fit(X, y) ransac_estimator2.fit(X, y) assert_array_almost_equal(ransac_estimator0.predict(X), ransac_estimator2.predict(X)) def test_ransac_default_residual_threshold(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, random_state=0) # Estimate parameters of corrupted data ransac_estimator.fit(X, y) # Ground truth / reference inlier mask ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_dynamic_max_trials(): # Numbers hand-calculated and confirmed on page 119 (Table 4.3) in # Hartley, R.~I. and Zisserman, A., 2004, # Multiple View Geometry in Computer Vision, Second Edition, # Cambridge University Press, ISBN: 0521540518 # e = 0%, min_samples = X assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1) # e = 5%, min_samples = 2 assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2) # e = 10%, min_samples = 2 assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3) # e = 30%, min_samples = 2 assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7) # e = 50%, min_samples = 2 assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17) # e = 5%, min_samples = 8 assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5) # e = 10%, min_samples = 8 assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9) # e = 30%, min_samples = 8 assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78) # e = 50%, min_samples = 8 assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177) # e = 0%, min_samples = 10 assert_equal(_dynamic_max_trials(1, 100, 10, 0), 0) assert_equal(_dynamic_max_trials(1, 100, 10, 1), float('inf')) base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, stop_probability=-0.1) assert_raises(ValueError, ransac_estimator.fit, X, y) ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, stop_probability=1.1) assert_raises(ValueError, ransac_estimator.fit, X, y) if __name__ == "__main__": np.testing.run_module_suite()
bsd-3-clause
ChanChiChoi/scikit-learn
sklearn/covariance/tests/test_covariance.py
142
11068
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Gael Varoquaux <gael.varoquaux@normalesup.org> # Virgile Fritsch <virgile.fritsch@inria.fr> # # License: BSD 3 clause import numpy as np from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_warns from sklearn import datasets from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \ ShrunkCovariance, shrunk_covariance, \ LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas X = datasets.load_diabetes().data X_1d = X[:, 0] n_samples, n_features = X.shape def test_covariance(): # Tests Covariance module on a simple dataset. # test covariance fit from data cov = EmpiricalCovariance() cov.fit(X) emp_cov = empirical_covariance(X) assert_array_almost_equal(emp_cov, cov.covariance_, 4) assert_almost_equal(cov.error_norm(emp_cov), 0) assert_almost_equal( cov.error_norm(emp_cov, norm='spectral'), 0) assert_almost_equal( cov.error_norm(emp_cov, norm='frobenius'), 0) assert_almost_equal( cov.error_norm(emp_cov, scaling=False), 0) assert_almost_equal( cov.error_norm(emp_cov, squared=False), 0) assert_raises(NotImplementedError, cov.error_norm, emp_cov, norm='foo') # Mahalanobis distances computation test mahal_dist = cov.mahalanobis(X) print(np.amin(mahal_dist), np.amax(mahal_dist)) assert(np.amin(mahal_dist) > 0) # test with n_features = 1 X_1d = X[:, 0].reshape((-1, 1)) cov = EmpiricalCovariance() cov.fit(X_1d) assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4) assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0) assert_almost_equal( cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0) # test with one sample # FIXME I don't know what this test does X_1sample = np.arange(5) cov = EmpiricalCovariance() assert_warns(UserWarning, cov.fit, X_1sample) assert_array_almost_equal(cov.covariance_, np.zeros(shape=(5, 5), dtype=np.float64)) # test integer type X_integer = np.asarray([[0, 1], [1, 0]]) result = np.asarray([[0.25, -0.25], [-0.25, 0.25]]) assert_array_almost_equal(empirical_covariance(X_integer), result) # test centered case cov = EmpiricalCovariance(assume_centered=True) cov.fit(X) assert_array_equal(cov.location_, np.zeros(X.shape[1])) def test_shrunk_covariance(): # Tests ShrunkCovariance module on a simple dataset. # compare shrunk covariance obtained from data and from MLE estimate cov = ShrunkCovariance(shrinkage=0.5) cov.fit(X) assert_array_almost_equal( shrunk_covariance(empirical_covariance(X), shrinkage=0.5), cov.covariance_, 4) # same test with shrinkage not provided cov = ShrunkCovariance() cov.fit(X) assert_array_almost_equal( shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4) # same test with shrinkage = 0 (<==> empirical_covariance) cov = ShrunkCovariance(shrinkage=0.) cov.fit(X) assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4) # test with n_features = 1 X_1d = X[:, 0].reshape((-1, 1)) cov = ShrunkCovariance(shrinkage=0.3) cov.fit(X_1d) assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4) # test shrinkage coeff on a simple data set (without saving precision) cov = ShrunkCovariance(shrinkage=0.5, store_precision=False) cov.fit(X) assert(cov.precision_ is None) def test_ledoit_wolf(): # Tests LedoitWolf module on a simple dataset. # test shrinkage coeff on a simple data set X_centered = X - X.mean(axis=0) lw = LedoitWolf(assume_centered=True) lw.fit(X_centered) shrinkage_ = lw.shrinkage_ score_ = lw.score(X_centered) assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True), shrinkage_) assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True, block_size=6), shrinkage_) # compare shrunk covariance obtained from data and from MLE estimate lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_centered, assume_centered=True) assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4) assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_) # compare estimates given by LW and ShrunkCovariance scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True) scov.fit(X_centered) assert_array_almost_equal(scov.covariance_, lw.covariance_, 4) # test with n_features = 1 X_1d = X[:, 0].reshape((-1, 1)) lw = LedoitWolf(assume_centered=True) lw.fit(X_1d) lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d, assume_centered=True) assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4) assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_) assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4) # test shrinkage coeff on a simple data set (without saving precision) lw = LedoitWolf(store_precision=False, assume_centered=True) lw.fit(X_centered) assert_almost_equal(lw.score(X_centered), score_, 4) assert(lw.precision_ is None) # Same tests without assuming centered data # test shrinkage coeff on a simple data set lw = LedoitWolf() lw.fit(X) assert_almost_equal(lw.shrinkage_, shrinkage_, 4) assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X)) assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1]) assert_almost_equal(lw.score(X), score_, 4) # compare shrunk covariance obtained from data and from MLE estimate lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X) assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4) assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_) # compare estimates given by LW and ShrunkCovariance scov = ShrunkCovariance(shrinkage=lw.shrinkage_) scov.fit(X) assert_array_almost_equal(scov.covariance_, lw.covariance_, 4) # test with n_features = 1 X_1d = X[:, 0].reshape((-1, 1)) lw = LedoitWolf() lw.fit(X_1d) lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d) assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4) assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_) assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4) # test with one sample # FIXME I don't know what this test does X_1sample = np.arange(5) lw = LedoitWolf() assert_warns(UserWarning, lw.fit, X_1sample) assert_array_almost_equal(lw.covariance_, np.zeros(shape=(5, 5), dtype=np.float64)) # test shrinkage coeff on a simple data set (without saving precision) lw = LedoitWolf(store_precision=False) lw.fit(X) assert_almost_equal(lw.score(X), score_, 4) assert(lw.precision_ is None) def test_ledoit_wolf_large(): # test that ledoit_wolf doesn't error on data that is wider than block_size rng = np.random.RandomState(0) # use a number of features that is larger than the block-size X = rng.normal(size=(10, 20)) lw = LedoitWolf(block_size=10).fit(X) # check that covariance is about diagonal (random normal noise) assert_almost_equal(lw.covariance_, np.eye(20), 0) cov = lw.covariance_ # check that the result is consistent with not splitting data into blocks. lw = LedoitWolf(block_size=25).fit(X) assert_almost_equal(lw.covariance_, cov) def test_oas(): # Tests OAS module on a simple dataset. # test shrinkage coeff on a simple data set X_centered = X - X.mean(axis=0) oa = OAS(assume_centered=True) oa.fit(X_centered) shrinkage_ = oa.shrinkage_ score_ = oa.score(X_centered) # compare shrunk covariance obtained from data and from MLE estimate oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_centered, assume_centered=True) assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4) assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_) # compare estimates given by OAS and ShrunkCovariance scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True) scov.fit(X_centered) assert_array_almost_equal(scov.covariance_, oa.covariance_, 4) # test with n_features = 1 X_1d = X[:, 0].reshape((-1, 1)) oa = OAS(assume_centered=True) oa.fit(X_1d) oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True) assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4) assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_) assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4) # test shrinkage coeff on a simple data set (without saving precision) oa = OAS(store_precision=False, assume_centered=True) oa.fit(X_centered) assert_almost_equal(oa.score(X_centered), score_, 4) assert(oa.precision_ is None) # Same tests without assuming centered data-------------------------------- # test shrinkage coeff on a simple data set oa = OAS() oa.fit(X) assert_almost_equal(oa.shrinkage_, shrinkage_, 4) assert_almost_equal(oa.score(X), score_, 4) # compare shrunk covariance obtained from data and from MLE estimate oa_cov_from_mle, oa_shinkrage_from_mle = oas(X) assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4) assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_) # compare estimates given by OAS and ShrunkCovariance scov = ShrunkCovariance(shrinkage=oa.shrinkage_) scov.fit(X) assert_array_almost_equal(scov.covariance_, oa.covariance_, 4) # test with n_features = 1 X_1d = X[:, 0].reshape((-1, 1)) oa = OAS() oa.fit(X_1d) oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d) assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4) assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_) assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4) # test with one sample # FIXME I don't know what this test does X_1sample = np.arange(5) oa = OAS() assert_warns(UserWarning, oa.fit, X_1sample) assert_array_almost_equal(oa.covariance_, np.zeros(shape=(5, 5), dtype=np.float64)) # test shrinkage coeff on a simple data set (without saving precision) oa = OAS(store_precision=False) oa.fit(X) assert_almost_equal(oa.score(X), score_, 4) assert(oa.precision_ is None)
bsd-3-clause
AlexanderFabisch/scikit-learn
doc/tutorial/text_analytics/solutions/exercise_02_sentiment.py
46
2798
"""Build a sentiment analysis / polarity model Sentiment analysis can be casted as a binary text classification problem, that is fitting a linear classifier on features extracted from the text of the user messages so as to guess wether the opinion of the author is positive or negative. In this examples we will use a movie review dataset. """ # Author: Olivier Grisel <olivier.grisel@ensta.org> # License: Simplified BSD import sys from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.svm import LinearSVC from sklearn.pipeline import Pipeline from sklearn.model_selection import GridSearchCV from sklearn.datasets import load_files from sklearn.model_selection import train_test_split from sklearn import metrics if __name__ == "__main__": # NOTE: we put the following in a 'if __name__ == "__main__"' protected # block to be able to use a multi-core grid search that also works under # Windows, see: http://docs.python.org/library/multiprocessing.html#windows # The multiprocessing module is used as the backend of joblib.Parallel # that is used when n_jobs != 1 in GridSearchCV # the training data folder must be passed as first argument movie_reviews_data_folder = sys.argv[1] dataset = load_files(movie_reviews_data_folder, shuffle=False) print("n_samples: %d" % len(dataset.data)) # split the dataset in training and test set: docs_train, docs_test, y_train, y_test = train_test_split( dataset.data, dataset.target, test_size=0.25, random_state=None) # TASK: Build a vectorizer / classifier pipeline that filters out tokens # that are too rare or too frequent pipeline = Pipeline([ ('vect', TfidfVectorizer(min_df=3, max_df=0.95)), ('clf', LinearSVC(C=1000)), ]) # TASK: Build a grid search to find out whether unigrams or bigrams are # more useful. # Fit the pipeline on the training set using grid search for the parameters parameters = { 'vect__ngram_range': [(1, 1), (1, 2)], } grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1) grid_search.fit(docs_train, y_train) # TASK: print the cross-validated scores for the each parameters set # explored by the grid search print(grid_search.grid_scores_) # TASK: Predict the outcome on the testing set and store it in a variable # named y_predicted y_predicted = grid_search.predict(docs_test) # Print the classification report print(metrics.classification_report(y_test, y_predicted, target_names=dataset.target_names)) # Print and plot the confusion matrix cm = metrics.confusion_matrix(y_test, y_predicted) print(cm) # import matplotlib.pyplot as plt # plt.matshow(cm) # plt.show()
bsd-3-clause
aabadie/scikit-learn
sklearn/utils/tests/test_testing.py
24
7902
import warnings import unittest import sys from nose.tools import assert_raises from sklearn.utils.testing import ( _assert_less, _assert_greater, assert_less_equal, assert_greater_equal, assert_warns, assert_no_warnings, assert_equal, set_random_state, assert_raise_message, ignore_warnings) from sklearn.tree import DecisionTreeClassifier from sklearn.discriminant_analysis import LinearDiscriminantAnalysis try: from nose.tools import assert_less def test_assert_less(): # Check that the nose implementation of assert_less gives the # same thing as the scikit's assert_less(0, 1) _assert_less(0, 1) assert_raises(AssertionError, assert_less, 1, 0) assert_raises(AssertionError, _assert_less, 1, 0) except ImportError: pass try: from nose.tools import assert_greater def test_assert_greater(): # Check that the nose implementation of assert_less gives the # same thing as the scikit's assert_greater(1, 0) _assert_greater(1, 0) assert_raises(AssertionError, assert_greater, 0, 1) assert_raises(AssertionError, _assert_greater, 0, 1) except ImportError: pass def test_assert_less_equal(): assert_less_equal(0, 1) assert_less_equal(1, 1) assert_raises(AssertionError, assert_less_equal, 1, 0) def test_assert_greater_equal(): assert_greater_equal(1, 0) assert_greater_equal(1, 1) assert_raises(AssertionError, assert_greater_equal, 0, 1) def test_set_random_state(): lda = LinearDiscriminantAnalysis() tree = DecisionTreeClassifier() # Linear Discriminant Analysis doesn't have random state: smoke test set_random_state(lda, 3) set_random_state(tree, 3) assert_equal(tree.random_state, 3) def test_assert_raise_message(): def _raise_ValueError(message): raise ValueError(message) def _no_raise(): pass assert_raise_message(ValueError, "test", _raise_ValueError, "test") assert_raises(AssertionError, assert_raise_message, ValueError, "something else", _raise_ValueError, "test") assert_raises(ValueError, assert_raise_message, TypeError, "something else", _raise_ValueError, "test") assert_raises(AssertionError, assert_raise_message, ValueError, "test", _no_raise) # multiple exceptions in a tuple assert_raises(AssertionError, assert_raise_message, (ValueError, AttributeError), "test", _no_raise) def test_ignore_warning(): # This check that ignore_warning decorateur and context manager are working # as expected def _warning_function(): warnings.warn("deprecation warning", DeprecationWarning) def _multiple_warning_function(): warnings.warn("deprecation warning", DeprecationWarning) warnings.warn("deprecation warning") # Check the function directly assert_no_warnings(ignore_warnings(_warning_function)) assert_no_warnings(ignore_warnings(_warning_function, category=DeprecationWarning)) assert_warns(DeprecationWarning, ignore_warnings(_warning_function, category=UserWarning)) assert_warns(UserWarning, ignore_warnings(_multiple_warning_function, category=DeprecationWarning)) assert_warns(DeprecationWarning, ignore_warnings(_multiple_warning_function, category=UserWarning)) assert_no_warnings(ignore_warnings(_warning_function, category=(DeprecationWarning, UserWarning))) # Check the decorator @ignore_warnings def decorator_no_warning(): _warning_function() _multiple_warning_function() @ignore_warnings(category=(DeprecationWarning, UserWarning)) def decorator_no_warning_multiple(): _multiple_warning_function() @ignore_warnings(category=DeprecationWarning) def decorator_no_deprecation_warning(): _warning_function() @ignore_warnings(category=UserWarning) def decorator_no_user_warning(): _warning_function() @ignore_warnings(category=DeprecationWarning) def decorator_no_deprecation_multiple_warning(): _multiple_warning_function() @ignore_warnings(category=UserWarning) def decorator_no_user_multiple_warning(): _multiple_warning_function() assert_no_warnings(decorator_no_warning) assert_no_warnings(decorator_no_warning_multiple) assert_no_warnings(decorator_no_deprecation_warning) assert_warns(DeprecationWarning, decorator_no_user_warning) assert_warns(UserWarning, decorator_no_deprecation_multiple_warning) assert_warns(DeprecationWarning, decorator_no_user_multiple_warning) # Check the context manager def context_manager_no_warning(): with ignore_warnings(): _warning_function() def context_manager_no_warning_multiple(): with ignore_warnings(category=(DeprecationWarning, UserWarning)): _multiple_warning_function() def context_manager_no_deprecation_warning(): with ignore_warnings(category=DeprecationWarning): _warning_function() def context_manager_no_user_warning(): with ignore_warnings(category=UserWarning): _warning_function() def context_manager_no_deprecation_multiple_warning(): with ignore_warnings(category=DeprecationWarning): _multiple_warning_function() def context_manager_no_user_multiple_warning(): with ignore_warnings(category=UserWarning): _multiple_warning_function() assert_no_warnings(context_manager_no_warning) assert_no_warnings(context_manager_no_warning_multiple) assert_no_warnings(context_manager_no_deprecation_warning) assert_warns(DeprecationWarning, context_manager_no_user_warning) assert_warns(UserWarning, context_manager_no_deprecation_multiple_warning) assert_warns(DeprecationWarning, context_manager_no_user_multiple_warning) # This class is inspired from numpy 1.7 with an alteration to check # the reset warning filters after calls to assert_warns. # This assert_warns behavior is specific to scikit-learn because #`clean_warning_registry()` is called internally by assert_warns # and clears all previous filters. class TestWarns(unittest.TestCase): def test_warn(self): def f(): warnings.warn("yo") return 3 # Test that assert_warns is not impacted by externally set # filters and is reset internally. # This is because `clean_warning_registry()` is called internally by # assert_warns and clears all previous filters. warnings.simplefilter("ignore", UserWarning) assert_equal(assert_warns(UserWarning, f), 3) # Test that the warning registry is empty after assert_warns assert_equal(sys.modules['warnings'].filters, []) assert_raises(AssertionError, assert_no_warnings, f) assert_equal(assert_no_warnings(lambda x: x, 1), 1) def test_warn_wrong_warning(self): def f(): warnings.warn("yo", DeprecationWarning) failed = False filters = sys.modules['warnings'].filters[:] try: try: # Should raise an AssertionError assert_warns(UserWarning, f) failed = True except AssertionError: pass finally: sys.modules['warnings'].filters = filters if failed: raise AssertionError("wrong warning caught by assert_warn")
bsd-3-clause
chaluemwut/fbserver
venv/lib/python2.7/site-packages/sklearn/feature_extraction/text.py
1
49725
# -*- coding: utf-8 -*- # Authors: Olivier Grisel <olivier.grisel@ensta.org> # Mathieu Blondel <mathieu@mblondel.org> # Lars Buitinck <L.J.Buitinck@uva.nl> # Robert Layton <robertlayton@gmail.com> # Jochen Wersdörfer <jochen@wersdoerfer.de> # Roman Sinayev <roman.sinayev@gmail.com> # # License: BSD 3 clause """ The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to build feature vectors from text documents. """ from __future__ import unicode_literals import array from collections import Mapping, defaultdict import numbers from operator import itemgetter import re import unicodedata import warnings import numpy as np import scipy.sparse as sp from ..base import BaseEstimator, TransformerMixin from ..externals.six.moves import xrange from ..preprocessing import normalize from .hashing import FeatureHasher from .stop_words import ENGLISH_STOP_WORDS from ..utils import deprecated from ..externals import six __all__ = ['CountVectorizer', 'ENGLISH_STOP_WORDS', 'TfidfTransformer', 'TfidfVectorizer', 'strip_accents_ascii', 'strip_accents_unicode', 'strip_tags'] def strip_accents_unicode(s): """Transform accentuated unicode symbols into their simple counterpart Warning: the python-level loop and join operations make this implementation 20 times slower than the strip_accents_ascii basic normalization. See also -------- strip_accents_ascii Remove accentuated char for any unicode symbol that has a direct ASCII equivalent. """ return ''.join([c for c in unicodedata.normalize('NFKD', s) if not unicodedata.combining(c)]) def strip_accents_ascii(s): """Transform accentuated unicode symbols into ascii or nothing Warning: this solution is only suited for languages that have a direct transliteration to ASCII symbols. See also -------- strip_accents_unicode Remove accentuated char for any unicode symbol. """ nkfd_form = unicodedata.normalize('NFKD', s) return nkfd_form.encode('ASCII', 'ignore').decode('ASCII') def strip_tags(s): """Basic regexp based HTML / XML tag stripper function For serious HTML/XML preprocessing you should rather use an external library such as lxml or BeautifulSoup. """ return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s) def _check_stop_list(stop): if stop == "english": return ENGLISH_STOP_WORDS elif isinstance(stop, six.string_types): raise ValueError("not a built-in stop list: %s" % stop) else: # assume it's a collection return stop class VectorizerMixin(object): """Provides common code for text vectorizers (tokenization logic).""" _white_spaces = re.compile(r"\s\s+") def decode(self, doc): """Decode the input into a string of unicode symbols The decoding strategy depends on the vectorizer parameters. """ if self.input == 'filename': with open(doc, 'rb') as fh: doc = fh.read() elif self.input == 'file': doc = doc.read() if isinstance(doc, bytes): doc = doc.decode(self.encoding, self.decode_error) if doc is np.nan: raise ValueError("np.nan is an invalid document, expected byte or unicode string.") return doc def _word_ngrams(self, tokens, stop_words=None): """Turn tokens into a sequence of n-grams after stop words filtering""" # handle stop words if stop_words is not None: tokens = [w for w in tokens if w not in stop_words] # handle token n-grams min_n, max_n = self.ngram_range if max_n != 1: original_tokens = tokens tokens = [] n_original_tokens = len(original_tokens) for n in xrange(min_n, min(max_n + 1, n_original_tokens + 1)): for i in xrange(n_original_tokens - n + 1): tokens.append(" ".join(original_tokens[i: i + n])) return tokens def _char_ngrams(self, text_document): """Tokenize text_document into a sequence of character n-grams""" # normalize white spaces text_document = self._white_spaces.sub(" ", text_document) text_len = len(text_document) ngrams = [] min_n, max_n = self.ngram_range for n in xrange(min_n, min(max_n + 1, text_len + 1)): for i in xrange(text_len - n + 1): ngrams.append(text_document[i: i + n]) return ngrams def _char_wb_ngrams(self, text_document): """Whitespace sensitive char-n-gram tokenization. Tokenize text_document into a sequence of character n-grams excluding any whitespace (operating only inside word boundaries)""" # normalize white spaces text_document = self._white_spaces.sub(" ", text_document) min_n, max_n = self.ngram_range ngrams = [] for w in text_document.split(): w = ' ' + w + ' ' w_len = len(w) for n in xrange(min_n, max_n + 1): offset = 0 ngrams.append(w[offset:offset + n]) while offset + n < w_len: offset += 1 ngrams.append(w[offset:offset + n]) if offset == 0: # count a short word (w_len < n) only once break return ngrams def build_preprocessor(self): """Return a function to preprocess the text before tokenization""" if self.preprocessor is not None: return self.preprocessor # unfortunately python functools package does not have an efficient # `compose` function that would have allowed us to chain a dynamic # number of functions. However the cost of a lambda call is a few # hundreds of nanoseconds which is negligible when compared to the # cost of tokenizing a string of 1000 chars for instance. noop = lambda x: x # accent stripping if not self.strip_accents: strip_accents = noop elif callable(self.strip_accents): strip_accents = self.strip_accents elif self.strip_accents == 'ascii': strip_accents = strip_accents_ascii elif self.strip_accents == 'unicode': strip_accents = strip_accents_unicode else: raise ValueError('Invalid value for "strip_accents": %s' % self.strip_accents) if self.lowercase: return lambda x: strip_accents(x.lower()) else: return strip_accents def build_tokenizer(self): """Return a function that splits a string into a sequence of tokens""" if self.tokenizer is not None: return self.tokenizer token_pattern = re.compile(self.token_pattern) return lambda doc: token_pattern.findall(doc) def get_stop_words(self): """Build or fetch the effective stop words list""" return _check_stop_list(self.stop_words) def build_analyzer(self): """Return a callable that handles preprocessing and tokenization""" if callable(self.analyzer): return self.analyzer preprocess = self.build_preprocessor() if self.analyzer == 'char': return lambda doc: self._char_ngrams(preprocess(self.decode(doc))) elif self.analyzer == 'char_wb': return lambda doc: self._char_wb_ngrams( preprocess(self.decode(doc))) elif self.analyzer == 'word': stop_words = self.get_stop_words() tokenize = self.build_tokenizer() return lambda doc: self._word_ngrams( tokenize(preprocess(self.decode(doc))), stop_words) else: raise ValueError('%s is not a valid tokenization scheme/analyzer' % self.analyzer) def _check_vocabulary(self): vocabulary = self.vocabulary if vocabulary is not None: if not isinstance(vocabulary, Mapping): vocab = {} for i, t in enumerate(vocabulary): if vocab.setdefault(t, i) != i: msg = "Duplicate term in vocabulary: %r" % t raise ValueError(msg) vocabulary = vocab else: indices = set(six.itervalues(vocabulary)) if len(indices) != len(vocabulary): raise ValueError("Vocabulary contains repeated indices.") for i in xrange(len(vocabulary)): if i not in indices: msg = ("Vocabulary of size %d doesn't contain index " "%d." % (len(vocabulary), i)) raise ValueError(msg) if not vocabulary: raise ValueError("empty vocabulary passed to fit") self.fixed_vocabulary_ = True self.vocabulary_ = dict(vocabulary) else: self.fixed_vocabulary_ = False @property @deprecated("The `fixed_vocabulary` attribute is deprecated and will be " "removed in 0.18. Please use `fixed_vocabulary_` instead.") def fixed_vocabulary(self): return self.fixed_vocabulary_ class HashingVectorizer(BaseEstimator, VectorizerMixin): """Convert a collection of text documents to a matrix of token occurrences It turns a collection of text documents into a scipy.sparse matrix holding token occurrence counts (or binary occurrence information), possibly normalized as token frequencies if norm='l1' or projected on the euclidean unit sphere if norm='l2'. This text vectorizer implementation uses the hashing trick to find the token string name to feature integer index mapping. This strategy has several advantages: - it is very low memory scalable to large datasets as there is no need to store a vocabulary dictionary in memory - it is fast to pickle and un-pickle as it holds no state besides the constructor parameters - it can be used in a streaming (partial fit) or parallel pipeline as there is no state computed during fit. There are also a couple of cons (vs using a CountVectorizer with an in-memory vocabulary): - there is no way to compute the inverse transform (from feature indices to string feature names) which can be a problem when trying to introspect which features are most important to a model. - there can be collisions: distinct tokens can be mapped to the same feature index. However in practice this is rarely an issue if n_features is large enough (e.g. 2 ** 18 for text classification problems). - no IDF weighting as this would render the transformer stateful. The hash function employed is the signed 32-bit version of Murmurhash3. Parameters ---------- input: string {'filename', 'file', 'content'} If 'filename', the sequence passed as an argument to fit is expected to be a list of filenames that need reading to fetch the raw content to analyze. If 'file', the sequence items must have a 'read' method (file-like object) that is called to fetch the bytes in memory. Otherwise the input is expected to be the sequence strings or bytes items are expected to be analyzed directly. encoding : string, 'utf-8' by default. If bytes or files are given to analyze, this encoding is used to decode. decode_error : {'strict', 'ignore', 'replace'} Instruction on what to do if a byte sequence is given to analyze that contains characters not of the given `encoding`. By default, it is 'strict', meaning that a UnicodeDecodeError will be raised. Other values are 'ignore' and 'replace'. strip_accents: {'ascii', 'unicode', None} Remove accents during the preprocessing step. 'ascii' is a fast method that only works on characters that have an direct ASCII mapping. 'unicode' is a slightly slower method that works on any characters. None (default) does nothing. analyzer: string, {'word', 'char', 'char_wb'} or callable Whether the feature should be made of word or character n-grams. Option 'char_wb' creates character n-grams only from text inside word boundaries. If a callable is passed it is used to extract the sequence of features out of the raw, unprocessed input. preprocessor: callable or None (default) Override the preprocessing (string transformation) stage while preserving the tokenizing and n-grams generation steps. tokenizer: callable or None (default) Override the string tokenization step while preserving the preprocessing and n-grams generation steps. ngram_range: tuple (min_n, max_n) The lower and upper boundary of the range of n-values for different n-grams to be extracted. All values of n such that min_n <= n <= max_n will be used. stop_words: string {'english'}, list, or None (default) If 'english', a built-in stop word list for English is used. If a list, that list is assumed to contain stop words, all of which will be removed from the resulting tokens. lowercase: boolean, default True Convert all characters to lowercase before tokenizing. token_pattern: string Regular expression denoting what constitutes a "token", only used if `analyzer == 'word'`. The default regexp selects tokens of 2 or more alphanumeric characters (punctuation is completely ignored and always treated as a token separator). n_features : integer, optional, (2 ** 20) by default The number of features (columns) in the output matrices. Small numbers of features are likely to cause hash collisions, but large numbers will cause larger coefficient dimensions in linear learners. norm : 'l1', 'l2' or None, optional Norm used to normalize term vectors. None for no normalization. binary: boolean, False by default. If True, all non zero counts are set to 1. This is useful for discrete probabilistic models that model binary events rather than integer counts. dtype: type, optional Type of the matrix returned by fit_transform() or transform(). non_negative : boolean, optional Whether output matrices should contain non-negative values only; effectively calls abs on the matrix prior to returning it. When True, output values can be interpreted as frequencies. When False, output values will have expected value zero. See also -------- CountVectorizer, TfidfVectorizer """ def __init__(self, input='content', charset=None, encoding='utf-8', decode_error='strict', charset_error=None, strip_accents=None, lowercase=True, preprocessor=None, tokenizer=None, stop_words=None, token_pattern=r"(?u)\b\w\w+\b", ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20), binary=False, norm='l2', non_negative=False, dtype=np.float64): self.input = input self.encoding = encoding self.decode_error = decode_error if charset is not None: warnings.warn("The charset parameter is deprecated as of version " "0.14 and will be removed in 0.16. Use encoding " "instead.", DeprecationWarning) self.encoding = charset if charset_error is not None: warnings.warn("The charset_error parameter is deprecated as of " "version 0.14 and will be removed in 0.16. Use " "decode_error instead.", DeprecationWarning) self.decode_error = charset_error self.strip_accents = strip_accents self.preprocessor = preprocessor self.tokenizer = tokenizer self.analyzer = analyzer self.lowercase = lowercase self.token_pattern = token_pattern self.stop_words = stop_words self.n_features = n_features self.ngram_range = ngram_range self.binary = binary self.norm = norm self.non_negative = non_negative self.dtype = dtype def partial_fit(self, X, y=None): """Does nothing: this transformer is stateless. This method is just there to mark the fact that this transformer can work in a streaming setup. """ return self def fit(self, X, y=None): """Does nothing: this transformer is stateless.""" # triggers a parameter validation self._get_hasher().fit(X, y=y) return self def transform(self, X, y=None): """Transform a sequence of documents to a document-term matrix. Parameters ---------- X : iterable over raw text documents, length = n_samples Samples. Each sample must be a text document (either bytes or unicode strings, file name or file object depending on the constructor argument) which will be tokenized and hashed. y : (ignored) Returns ------- X : scipy.sparse matrix, shape = (n_samples, self.n_features) Document-term matrix. """ analyzer = self.build_analyzer() X = self._get_hasher().transform(analyzer(doc) for doc in X) if self.binary: X.data.fill(1) if self.norm is not None: X = normalize(X, norm=self.norm, copy=False) return X # Alias transform to fit_transform for convenience fit_transform = transform def _get_hasher(self): return FeatureHasher(n_features=self.n_features, input_type='string', dtype=self.dtype, non_negative=self.non_negative) def _document_frequency(X): """Count the number of non-zero values for each feature in sparse X.""" if sp.isspmatrix_csr(X): return np.bincount(X.indices, minlength=X.shape[1]) else: return np.diff(sp.csc_matrix(X, copy=False).indptr) class CountVectorizer(BaseEstimator, VectorizerMixin): """Convert a collection of text documents to a matrix of token counts This implementation produces a sparse representation of the counts using scipy.sparse.coo_matrix. If you do not provide an a-priori dictionary and you do not use an analyzer that does some kind of feature selection then the number of features will be equal to the vocabulary size found by analyzing the data. Parameters ---------- input : string {'filename', 'file', 'content'} If 'filename', the sequence passed as an argument to fit is expected to be a list of filenames that need reading to fetch the raw content to analyze. If 'file', the sequence items must have a 'read' method (file-like object) that is called to fetch the bytes in memory. Otherwise the input is expected to be the sequence strings or bytes items are expected to be analyzed directly. encoding : string, 'utf-8' by default. If bytes or files are given to analyze, this encoding is used to decode. decode_error : {'strict', 'ignore', 'replace'} Instruction on what to do if a byte sequence is given to analyze that contains characters not of the given `encoding`. By default, it is 'strict', meaning that a UnicodeDecodeError will be raised. Other values are 'ignore' and 'replace'. strip_accents : {'ascii', 'unicode', None} Remove accents during the preprocessing step. 'ascii' is a fast method that only works on characters that have an direct ASCII mapping. 'unicode' is a slightly slower method that works on any characters. None (default) does nothing. analyzer : string, {'word', 'char', 'char_wb'} or callable Whether the feature should be made of word or character n-grams. Option 'char_wb' creates character n-grams only from text inside word boundaries. If a callable is passed it is used to extract the sequence of features out of the raw, unprocessed input. preprocessor : callable or None (default) Override the preprocessing (string transformation) stage while preserving the tokenizing and n-grams generation steps. tokenizer : callable or None (default) Override the string tokenization step while preserving the preprocessing and n-grams generation steps. ngram_range : tuple (min_n, max_n) The lower and upper boundary of the range of n-values for different n-grams to be extracted. All values of n such that min_n <= n <= max_n will be used. stop_words : string {'english'}, list, or None (default) If 'english', a built-in stop word list for English is used. If a list, that list is assumed to contain stop words, all of which will be removed from the resulting tokens. If None, no stop words will be used. max_df can be set to a value in the range [0.7, 1.0) to automatically detect and filter stop words based on intra corpus document frequency of terms. lowercase : boolean, True by default Convert all characters to lowercase before tokenizing. token_pattern : string Regular expression denoting what constitutes a "token", only used if `tokenize == 'word'`. The default regexp select tokens of 2 or more alphanumeric characters (punctuation is completely ignored and always treated as a token separator). max_df : float in range [0.0, 1.0] or int, optional, 1.0 by default When building the vocabulary ignore terms that have a document frequency strictly higher than the given threshold (corpus-specific stop words). If float, the parameter represents a proportion of documents, integer absolute counts. This parameter is ignored if vocabulary is not None. min_df : float in range [0.0, 1.0] or int, optional, 1 by default When building the vocabulary ignore terms that have a document frequency strictly lower than the given threshold. This value is also called cut-off in the literature. If float, the parameter represents a proportion of documents, integer absolute counts. This parameter is ignored if vocabulary is not None. max_features : optional, None by default If not None, build a vocabulary that only consider the top max_features ordered by term frequency across the corpus. This parameter is ignored if vocabulary is not None. vocabulary : Mapping or iterable, optional Either a Mapping (e.g., a dict) where keys are terms and values are indices in the feature matrix, or an iterable over terms. If not given, a vocabulary is determined from the input documents. Indices in the mapping should not be repeated and should not have any gap between 0 and the largest index. binary : boolean, False by default. If True, all non zero counts are set to 1. This is useful for discrete probabilistic models that model binary events rather than integer counts. dtype : type, optional Type of the matrix returned by fit_transform() or transform(). Attributes ---------- `vocabulary_` : dict A mapping of terms to feature indices. `stop_words_` : set Terms that were ignored because they occurred in either too many (`max_df`) or in too few (`min_df`) documents. This is only available if no vocabulary was given. See also -------- HashingVectorizer, TfidfVectorizer """ def __init__(self, input='content', encoding='utf-8', charset=None, decode_error='strict', charset_error=None, strip_accents=None, lowercase=True, preprocessor=None, tokenizer=None, stop_words=None, token_pattern=r"(?u)\b\w\w+\b", ngram_range=(1, 1), analyzer='word', max_df=1.0, min_df=1, max_features=None, vocabulary=None, binary=False, dtype=np.int64): self.input = input self.encoding = encoding self.decode_error = decode_error if charset is not None: warnings.warn("The charset parameter is deprecated as of version " "0.14 and will be removed in 0.16. Use encoding " "instead.", DeprecationWarning) self.encoding = charset if charset_error is not None: warnings.warn("The charset_error parameter is deprecated as of " "version 0.14 and will be removed in 0.16. Use " "decode_error instead.", DeprecationWarning) self.decode_error = charset_error self.strip_accents = strip_accents self.preprocessor = preprocessor self.tokenizer = tokenizer self.analyzer = analyzer self.lowercase = lowercase self.token_pattern = token_pattern self.stop_words = stop_words self.max_df = max_df self.min_df = min_df if max_df < 0 or min_df < 0: raise ValueError("negative value for max_df of min_df") self.max_features = max_features if max_features is not None: if (not isinstance(max_features, numbers.Integral) or max_features <= 0): raise ValueError( "max_features=%r, neither a positive integer nor None" % max_features) self.ngram_range = ngram_range self.vocabulary = vocabulary self.binary = binary self.dtype = dtype def _sort_features(self, X, vocabulary): """Sort features by name Returns a reordered matrix and modifies the vocabulary in place """ sorted_features = sorted(six.iteritems(vocabulary)) map_index = np.empty(len(sorted_features), dtype=np.int32) for new_val, (term, old_val) in enumerate(sorted_features): map_index[new_val] = old_val vocabulary[term] = new_val return X[:, map_index] def _limit_features(self, X, vocabulary, high=None, low=None, limit=None): """Remove too rare or too common features. Prune features that are non zero in more samples than high or less documents than low, modifying the vocabulary, and restricting it to at most the limit most frequent. This does not prune samples with zero features. """ if high is None and low is None and limit is None: return X, set() # Calculate a mask based on document frequencies dfs = _document_frequency(X) tfs = np.asarray(X.sum(axis=0)).ravel() mask = np.ones(len(dfs), dtype=bool) if high is not None: mask &= dfs <= high if low is not None: mask &= dfs >= low if limit is not None and mask.sum() > limit: mask_inds = (-tfs[mask]).argsort()[:limit] new_mask = np.zeros(len(dfs), dtype=bool) new_mask[np.where(mask)[0][mask_inds]] = True mask = new_mask new_indices = np.cumsum(mask) - 1 # maps old indices to new removed_terms = set() for term, old_index in list(six.iteritems(vocabulary)): if mask[old_index]: vocabulary[term] = new_indices[old_index] else: del vocabulary[term] removed_terms.add(term) kept_indices = np.where(mask)[0] if len(kept_indices) == 0: raise ValueError("After pruning, no terms remain. Try a lower" " min_df or a higher max_df.") return X[:, kept_indices], removed_terms def _count_vocab(self, raw_documents, fixed_vocab): """Create sparse feature matrix, and vocabulary where fixed_vocab=False """ if fixed_vocab: vocabulary = self.vocabulary_ else: # Add a new value when a new vocabulary item is seen vocabulary = defaultdict() vocabulary.default_factory = vocabulary.__len__ analyze = self.build_analyzer() j_indices = _make_int_array() indptr = _make_int_array() indptr.append(0) for doc in raw_documents: for feature in analyze(doc): try: j_indices.append(vocabulary[feature]) except KeyError: # Ignore out-of-vocabulary items for fixed_vocab=True continue indptr.append(len(j_indices)) if not fixed_vocab: # disable defaultdict behaviour vocabulary = dict(vocabulary) if not vocabulary: raise ValueError("empty vocabulary; perhaps the documents only" " contain stop words") # some Python/Scipy versions won't accept an array.array: if j_indices: j_indices = np.frombuffer(j_indices, dtype=np.intc) else: j_indices = np.array([], dtype=np.int32) indptr = np.frombuffer(indptr, dtype=np.intc) values = np.ones(len(j_indices)) X = sp.csr_matrix((values, j_indices, indptr), shape=(len(indptr) - 1, len(vocabulary)), dtype=self.dtype) X.sum_duplicates() return vocabulary, X def fit(self, raw_documents, y=None): """Learn a vocabulary dictionary of all tokens in the raw documents. Parameters ---------- raw_documents : iterable An iterable which yields either str, unicode or file objects. Returns ------- self """ self.fit_transform(raw_documents) return self def fit_transform(self, raw_documents, y=None): """Learn the vocabulary dictionary and return term-document matrix. This is equivalent to fit followed by transform, but more efficiently implemented. Parameters ---------- raw_documents : iterable An iterable which yields either str, unicode or file objects. Returns ------- X : array, [n_samples, n_features] Document-term matrix. """ # We intentionally don't call the transform method to make # fit_transform overridable without unwanted side effects in # TfidfVectorizer. self._check_vocabulary() max_df = self.max_df min_df = self.min_df max_features = self.max_features vocabulary, X = self._count_vocab(raw_documents, self.fixed_vocabulary_) if self.binary: X.data.fill(1) if not self.fixed_vocabulary_: X = self._sort_features(X, vocabulary) n_doc = X.shape[0] max_doc_count = (max_df if isinstance(max_df, numbers.Integral) else max_df * n_doc) min_doc_count = (min_df if isinstance(min_df, numbers.Integral) else min_df * n_doc) if max_doc_count < min_doc_count: raise ValueError( "max_df corresponds to < documents than min_df") X, self.stop_words_ = self._limit_features(X, vocabulary, max_doc_count, min_doc_count, max_features) self.vocabulary_ = vocabulary return X def transform(self, raw_documents): """Transform documents to document-term matrix. Extract token counts out of raw text documents using the vocabulary fitted with fit or the one provided to the constructor. Parameters ---------- raw_documents : iterable An iterable which yields either str, unicode or file objects. Returns ------- X : sparse matrix, [n_samples, n_features] Document-term matrix. """ if not hasattr(self, 'vocabulary_'): self._check_vocabulary() if not hasattr(self, 'vocabulary_') or len(self.vocabulary_) == 0: raise ValueError("Vocabulary wasn't fitted or is empty!") # use the same matrix-building strategy as fit_transform _, X = self._count_vocab(raw_documents, fixed_vocab=True) if self.binary: X.data.fill(1) return X def inverse_transform(self, X): """Return terms per document with nonzero entries in X. Parameters ---------- X : {array, sparse matrix}, shape = [n_samples, n_features] Returns ------- X_inv : list of arrays, len = n_samples List of arrays of terms. """ if sp.issparse(X): # We need CSR format for fast row manipulations. X = X.tocsr() else: # We need to convert X to a matrix, so that the indexing # returns 2D objects X = np.asmatrix(X) n_samples = X.shape[0] terms = np.array(list(self.vocabulary_.keys())) indices = np.array(list(self.vocabulary_.values())) inverse_vocabulary = terms[np.argsort(indices)] return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel() for i in range(n_samples)] def get_feature_names(self): """Array mapping from feature integer indices to feature name""" if not hasattr(self, 'vocabulary_') or len(self.vocabulary_) == 0: raise ValueError("Vocabulary wasn't fitted or is empty!") return [t for t, i in sorted(six.iteritems(self.vocabulary_), key=itemgetter(1))] def _make_int_array(): """Construct an array.array of a type suitable for scipy.sparse indices.""" return array.array(str("i")) class TfidfTransformer(BaseEstimator, TransformerMixin): """Transform a count matrix to a normalized tf or tf-idf representation Tf means term-frequency while tf-idf means term-frequency times inverse document-frequency. This is a common term weighting scheme in information retrieval, that has also found good use in document classification. The goal of using tf-idf instead of the raw frequencies of occurrence of a token in a given document is to scale down the impact of tokens that occur very frequently in a given corpus and that are hence empirically less informative than features that occur in a small fraction of the training corpus. The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf, instead of tf * idf. The effect of this is that terms with zero idf, i.e. that occur in all documents of a training set, will not be entirely ignored. The formulas used to compute tf and idf depend on parameter settings that correspond to the SMART notation used in IR, as follows: Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True. Idf is "t" when use_idf is given, "n" (none) otherwise. Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None. Parameters ---------- norm : 'l1', 'l2' or None, optional Norm used to normalize term vectors. None for no normalization. use_idf : boolean, optional Enable inverse-document-frequency reweighting. smooth_idf : boolean, optional Smooth idf weights by adding one to document frequencies, as if an extra document was seen containing every term in the collection exactly once. Prevents zero divisions. sublinear_tf : boolean, optional Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf). References ---------- .. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern Information Retrieval. Addison Wesley, pp. 68-74.` .. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to Information Retrieval. Cambridge University Press, pp. 118-120.` """ def __init__(self, norm='l2', use_idf=True, smooth_idf=True, sublinear_tf=False): self.norm = norm self.use_idf = use_idf self.smooth_idf = smooth_idf self.sublinear_tf = sublinear_tf def fit(self, X, y=None): """Learn the idf vector (global term weights) Parameters ---------- X : sparse matrix, [n_samples, n_features] a matrix of term/token counts """ if not sp.issparse(X): X = sp.csc_matrix(X) if self.use_idf: n_samples, n_features = X.shape df = _document_frequency(X) # perform idf smoothing if required df += int(self.smooth_idf) n_samples += int(self.smooth_idf) # log1p instead of log makes sure terms with zero idf don't get # suppressed entirely idf = np.log(float(n_samples) / df) + 1.0 self._idf_diag = sp.spdiags(idf, diags=0, m=n_features, n=n_features) return self def transform(self, X, copy=True): """Transform a count matrix to a tf or tf-idf representation Parameters ---------- X : sparse matrix, [n_samples, n_features] a matrix of term/token counts Returns ------- vectors : sparse matrix, [n_samples, n_features] """ if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float): # preserve float family dtype X = sp.csr_matrix(X, copy=copy) else: # convert counts or binary occurrences to floats X = sp.csr_matrix(X, dtype=np.float64, copy=copy) n_samples, n_features = X.shape if self.sublinear_tf: np.log(X.data, X.data) X.data += 1 if self.use_idf: if not hasattr(self, "_idf_diag"): raise ValueError("idf vector not fitted") expected_n_features = self._idf_diag.shape[0] if n_features != expected_n_features: raise ValueError("Input has n_features=%d while the model" " has been trained with n_features=%d" % ( n_features, expected_n_features)) # *= doesn't work X = X * self._idf_diag if self.norm: X = normalize(X, norm=self.norm, copy=False) return X @property def idf_(self): if hasattr(self, "_idf_diag"): return np.ravel(self._idf_diag.sum(axis=0)) else: return None class TfidfVectorizer(CountVectorizer): """Convert a collection of raw documents to a matrix of TF-IDF features. Equivalent to CountVectorizer followed by TfidfTransformer. Parameters ---------- input : string {'filename', 'file', 'content'} If 'filename', the sequence passed as an argument to fit is expected to be a list of filenames that need reading to fetch the raw content to analyze. If 'file', the sequence items must have a 'read' method (file-like object) that is called to fetch the bytes in memory. Otherwise the input is expected to be the sequence strings or bytes items are expected to be analyzed directly. encoding : string, 'utf-8' by default. If bytes or files are given to analyze, this encoding is used to decode. decode_error : {'strict', 'ignore', 'replace'} Instruction on what to do if a byte sequence is given to analyze that contains characters not of the given `encoding`. By default, it is 'strict', meaning that a UnicodeDecodeError will be raised. Other values are 'ignore' and 'replace'. strip_accents : {'ascii', 'unicode', None} Remove accents during the preprocessing step. 'ascii' is a fast method that only works on characters that have an direct ASCII mapping. 'unicode' is a slightly slower method that works on any characters. None (default) does nothing. analyzer : string, {'word', 'char'} or callable Whether the feature should be made of word or character n-grams. If a callable is passed it is used to extract the sequence of features out of the raw, unprocessed input. preprocessor : callable or None (default) Override the preprocessing (string transformation) stage while preserving the tokenizing and n-grams generation steps. tokenizer : callable or None (default) Override the string tokenization step while preserving the preprocessing and n-grams generation steps. ngram_range : tuple (min_n, max_n) The lower and upper boundary of the range of n-values for different n-grams to be extracted. All values of n such that min_n <= n <= max_n will be used. stop_words : string {'english'}, list, or None (default) If a string, it is passed to _check_stop_list and the appropriate stop list is returned. 'english' is currently the only supported string value. If a list, that list is assumed to contain stop words, all of which will be removed from the resulting tokens. If None, no stop words will be used. max_df can be set to a value in the range [0.7, 1.0) to automatically detect and filter stop words based on intra corpus document frequency of terms. lowercase : boolean, default True Convert all characters to lowercase before tokenizing. token_pattern : string Regular expression denoting what constitutes a "token", only used if `analyzer == 'word'`. The default regexp selects tokens of 2 or more alphanumeric characters (punctuation is completely ignored and always treated as a token separator). max_df : float in range [0.0, 1.0] or int, optional, 1.0 by default When building the vocabulary ignore terms that have a term frequency strictly higher than the given threshold (corpus specific stop words). If float, the parameter represents a proportion of documents, integer absolute counts. This parameter is ignored if vocabulary is not None. min_df : float in range [0.0, 1.0] or int, optional, 1 by default When building the vocabulary ignore terms that have a term frequency strictly lower than the given threshold. This value is also called cut-off in the literature. If float, the parameter represents a proportion of documents, integer absolute counts. This parameter is ignored if vocabulary is not None. max_features : optional, None by default If not None, build a vocabulary that only consider the top max_features ordered by term frequency across the corpus. This parameter is ignored if vocabulary is not None. vocabulary : Mapping or iterable, optional Either a Mapping (e.g., a dict) where keys are terms and values are indices in the feature matrix, or an iterable over terms. If not given, a vocabulary is determined from the input documents. binary : boolean, False by default. If True, all non-zero term counts are set to 1. This does not mean outputs will have only 0/1 values, only that the tf term in tf-idf is binary. (Set idf and normalization to False to get 0/1 outputs.) dtype : type, optional Type of the matrix returned by fit_transform() or transform(). norm : 'l1', 'l2' or None, optional Norm used to normalize term vectors. None for no normalization. use_idf : boolean, optional Enable inverse-document-frequency reweighting. smooth_idf : boolean, optional Smooth idf weights by adding one to document frequencies, as if an extra document was seen containing every term in the collection exactly once. Prevents zero divisions. sublinear_tf : boolean, optional Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf). Attributes ---------- ``idf_`` : array, shape = [n_features], or None The learned idf vector (global term weights) when ``use_idf`` is set to True, None otherwise. See also -------- CountVectorizer Tokenize the documents and count the occurrences of token and return them as a sparse matrix TfidfTransformer Apply Term Frequency Inverse Document Frequency normalization to a sparse matrix of occurrence counts. """ def __init__(self, input='content', encoding='utf-8', charset=None, decode_error='strict', charset_error=None, strip_accents=None, lowercase=True, preprocessor=None, tokenizer=None, analyzer='word', stop_words=None, token_pattern=r"(?u)\b\w\w+\b", ngram_range=(1, 1), max_df=1.0, min_df=1, max_features=None, vocabulary=None, binary=False, dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True, sublinear_tf=False): super(TfidfVectorizer, self).__init__( input=input, charset=charset, charset_error=charset_error, encoding=encoding, decode_error=decode_error, strip_accents=strip_accents, lowercase=lowercase, preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer, stop_words=stop_words, token_pattern=token_pattern, ngram_range=ngram_range, max_df=max_df, min_df=min_df, max_features=max_features, vocabulary=vocabulary, binary=binary, dtype=dtype) self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf, smooth_idf=smooth_idf, sublinear_tf=sublinear_tf) # Broadcast the TF-IDF parameters to the underlying transformer instance # for easy grid search and repr @property def norm(self): return self._tfidf.norm @norm.setter def norm(self, value): self._tfidf.norm = value @property def use_idf(self): return self._tfidf.use_idf @use_idf.setter def use_idf(self, value): self._tfidf.use_idf = value @property def smooth_idf(self): return self._tfidf.smooth_idf @smooth_idf.setter def smooth_idf(self, value): self._tfidf.smooth_idf = value @property def sublinear_tf(self): return self._tfidf.sublinear_tf @sublinear_tf.setter def sublinear_tf(self, value): self._tfidf.sublinear_tf = value @property def idf_(self): return self._tfidf.idf_ def fit(self, raw_documents, y=None): """Learn vocabulary and idf from training set. Parameters ---------- raw_documents : iterable an iterable which yields either str, unicode or file objects Returns ------- self : TfidfVectorizer """ X = super(TfidfVectorizer, self).fit_transform(raw_documents) self._tfidf.fit(X) return self def fit_transform(self, raw_documents, y=None): """Learn vocabulary and idf, return term-document matrix. This is equivalent to fit followed by transform, but more efficiently implemented. Parameters ---------- raw_documents : iterable an iterable which yields either str, unicode or file objects Returns ------- X : sparse matrix, [n_samples, n_features] Tf-idf-weighted document-term matrix. """ X = super(TfidfVectorizer, self).fit_transform(raw_documents) self._tfidf.fit(X) # X is already a transformed view of raw_documents so # we set copy to False return self._tfidf.transform(X, copy=False) def transform(self, raw_documents, copy=True): """Transform documents to document-term matrix. Uses the vocabulary and document frequencies (df) learned by fit (or fit_transform). Parameters ---------- raw_documents : iterable an iterable which yields either str, unicode or file objects Returns ------- X : sparse matrix, [n_samples, n_features] Tf-idf-weighted document-term matrix. """ X = super(TfidfVectorizer, self).transform(raw_documents) return self._tfidf.transform(X, copy=False)
apache-2.0
raghavrv/scikit-learn
sklearn/neighbors/tests/test_approximate.py
12
20126
""" Testing for the approximate neighbor search using Locality Sensitive Hashing Forest module (sklearn.neighbors.LSHForest). """ # Author: Maheshakya Wijewardena, Joel Nothman import numpy as np import scipy.sparse as sp from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_array_less from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_not_equal from sklearn.utils.testing import assert_warns_message from sklearn.utils.testing import ignore_warnings from sklearn.metrics.pairwise import pairwise_distances from sklearn.neighbors import LSHForest from sklearn.neighbors import NearestNeighbors def test_lsh_forest_deprecation(): assert_warns_message(DeprecationWarning, "LSHForest has poor performance and has been " "deprecated in 0.19. It will be removed " "in version 0.21.", LSHForest) def test_neighbors_accuracy_with_n_candidates(): # Checks whether accuracy increases as `n_candidates` increases. n_candidates_values = np.array([.1, 50, 500]) n_samples = 100 n_features = 10 n_iter = 10 n_points = 5 rng = np.random.RandomState(42) accuracies = np.zeros(n_candidates_values.shape[0], dtype=float) X = rng.rand(n_samples, n_features) for i, n_candidates in enumerate(n_candidates_values): lshf = ignore_warnings(LSHForest, category=DeprecationWarning)( n_candidates=n_candidates) ignore_warnings(lshf.fit)(X) for j in range(n_iter): query = X[rng.randint(0, n_samples)].reshape(1, -1) neighbors = lshf.kneighbors(query, n_neighbors=n_points, return_distance=False) distances = pairwise_distances(query, X, metric='cosine') ranks = np.argsort(distances)[0, :n_points] intersection = np.intersect1d(ranks, neighbors).shape[0] ratio = intersection / float(n_points) accuracies[i] = accuracies[i] + ratio accuracies[i] = accuracies[i] / float(n_iter) # Sorted accuracies should be equal to original accuracies print('accuracies:', accuracies) assert_true(np.all(np.diff(accuracies) >= 0), msg="Accuracies are not non-decreasing.") # Highest accuracy should be strictly greater than the lowest assert_true(np.ptp(accuracies) > 0, msg="Highest accuracy is not strictly greater than lowest.") def test_neighbors_accuracy_with_n_estimators(): # Checks whether accuracy increases as `n_estimators` increases. n_estimators = np.array([1, 10, 100]) n_samples = 100 n_features = 10 n_iter = 10 n_points = 5 rng = np.random.RandomState(42) accuracies = np.zeros(n_estimators.shape[0], dtype=float) X = rng.rand(n_samples, n_features) for i, t in enumerate(n_estimators): lshf = ignore_warnings(LSHForest, category=DeprecationWarning)( n_candidates=500, n_estimators=t) ignore_warnings(lshf.fit)(X) for j in range(n_iter): query = X[rng.randint(0, n_samples)].reshape(1, -1) neighbors = lshf.kneighbors(query, n_neighbors=n_points, return_distance=False) distances = pairwise_distances(query, X, metric='cosine') ranks = np.argsort(distances)[0, :n_points] intersection = np.intersect1d(ranks, neighbors).shape[0] ratio = intersection / float(n_points) accuracies[i] = accuracies[i] + ratio accuracies[i] = accuracies[i] / float(n_iter) # Sorted accuracies should be equal to original accuracies assert_true(np.all(np.diff(accuracies) >= 0), msg="Accuracies are not non-decreasing.") # Highest accuracy should be strictly greater than the lowest assert_true(np.ptp(accuracies) > 0, msg="Highest accuracy is not strictly greater than lowest.") @ignore_warnings def test_kneighbors(): # Checks whether desired number of neighbors are returned. # It is guaranteed to return the requested number of neighbors # if `min_hash_match` is set to 0. Returned distances should be # in ascending order. n_samples = 12 n_features = 2 n_iter = 10 rng = np.random.RandomState(42) X = rng.rand(n_samples, n_features) lshf = ignore_warnings(LSHForest, category=DeprecationWarning)( min_hash_match=0) # Test unfitted estimator assert_raises(ValueError, lshf.kneighbors, X[0]) ignore_warnings(lshf.fit)(X) for i in range(n_iter): n_neighbors = rng.randint(0, n_samples) query = X[rng.randint(0, n_samples)].reshape(1, -1) neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors, return_distance=False) # Desired number of neighbors should be returned. assert_equal(neighbors.shape[1], n_neighbors) # Multiple points n_queries = 5 queries = X[rng.randint(0, n_samples, n_queries)] distances, neighbors = lshf.kneighbors(queries, n_neighbors=1, return_distance=True) assert_equal(neighbors.shape[0], n_queries) assert_equal(distances.shape[0], n_queries) # Test only neighbors neighbors = lshf.kneighbors(queries, n_neighbors=1, return_distance=False) assert_equal(neighbors.shape[0], n_queries) # Test random point(not in the data set) query = rng.randn(n_features).reshape(1, -1) lshf.kneighbors(query, n_neighbors=1, return_distance=False) # Test n_neighbors at initialization neighbors = lshf.kneighbors(query, return_distance=False) assert_equal(neighbors.shape[1], 5) # Test `neighbors` has an integer dtype assert_true(neighbors.dtype.kind == 'i', msg="neighbors are not in integer dtype.") def test_radius_neighbors(): # Checks whether Returned distances are less than `radius` # At least one point should be returned when the `radius` is set # to mean distance from the considering point to other points in # the database. # Moreover, this test compares the radius neighbors of LSHForest # with the `sklearn.neighbors.NearestNeighbors`. n_samples = 12 n_features = 2 n_iter = 10 rng = np.random.RandomState(42) X = rng.rand(n_samples, n_features) lshf = ignore_warnings(LSHForest, category=DeprecationWarning)() # Test unfitted estimator assert_raises(ValueError, lshf.radius_neighbors, X[0]) ignore_warnings(lshf.fit)(X) for i in range(n_iter): # Select a random point in the dataset as the query query = X[rng.randint(0, n_samples)].reshape(1, -1) # At least one neighbor should be returned when the radius is the # mean distance from the query to the points of the dataset. mean_dist = np.mean(pairwise_distances(query, X, metric='cosine')) neighbors = lshf.radius_neighbors(query, radius=mean_dist, return_distance=False) assert_equal(neighbors.shape, (1,)) assert_equal(neighbors.dtype, object) assert_greater(neighbors[0].shape[0], 0) # All distances to points in the results of the radius query should # be less than mean_dist distances, neighbors = lshf.radius_neighbors(query, radius=mean_dist, return_distance=True) assert_array_less(distances[0], mean_dist) # Multiple points n_queries = 5 queries = X[rng.randint(0, n_samples, n_queries)] distances, neighbors = lshf.radius_neighbors(queries, return_distance=True) # dists and inds should not be 1D arrays or arrays of variable lengths # hence the use of the object dtype. assert_equal(distances.shape, (n_queries,)) assert_equal(distances.dtype, object) assert_equal(neighbors.shape, (n_queries,)) assert_equal(neighbors.dtype, object) # Compare with exact neighbor search query = X[rng.randint(0, n_samples)].reshape(1, -1) mean_dist = np.mean(pairwise_distances(query, X, metric='cosine')) nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X) distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist) distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist) # Radius-based queries do not sort the result points and the order # depends on the method, the random_state and the dataset order. Therefore # we need to sort the results ourselves before performing any comparison. sorted_dists_exact = np.sort(distances_exact[0]) sorted_dists_approx = np.sort(distances_approx[0]) # Distances to exact neighbors are less than or equal to approximate # counterparts as the approximate radius query might have missed some # closer neighbors. assert_true(np.all(np.less_equal(sorted_dists_exact, sorted_dists_approx))) @ignore_warnings def test_radius_neighbors_boundary_handling(): X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]] n_points = len(X) # Build an exact nearest neighbors model as reference model to ensure # consistency between exact and approximate methods nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X) # Build a LSHForest model with hyperparameter values that always guarantee # exact results on this toy dataset. lsfh = ignore_warnings(LSHForest, category=DeprecationWarning)( min_hash_match=0, n_candidates=n_points, random_state=42).fit(X) # define a query aligned with the first axis query = [[1., 0.]] # Compute the exact cosine distances of the query to the four points of # the dataset dists = pairwise_distances(query, X, metric='cosine').ravel() # The first point is almost aligned with the query (very small angle), # the cosine distance should therefore be almost null: assert_almost_equal(dists[0], 0, decimal=5) # The second point form an angle of 45 degrees to the query vector assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4)) # The third point is orthogonal from the query vector hence at a distance # exactly one: assert_almost_equal(dists[2], 1) # The last point is almost colinear but with opposite sign to the query # therefore it has a cosine 'distance' very close to the maximum possible # value of 2. assert_almost_equal(dists[3], 2, decimal=5) # If we query with a radius of one, all the samples except the last sample # should be included in the results. This means that the third sample # is lying on the boundary of the radius query: exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1) approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1) assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2]) assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2]) assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1]) assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1]) # If we perform the same query with a slightly lower radius, the third # point of the dataset that lay on the boundary of the previous query # is now rejected: eps = np.finfo(np.float64).eps exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps) approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps) assert_array_equal(np.sort(exact_idx[0]), [0, 1]) assert_array_equal(np.sort(approx_idx[0]), [0, 1]) assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2]) assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2]) def test_distances(): # Checks whether returned neighbors are from closest to farthest. n_samples = 12 n_features = 2 n_iter = 10 rng = np.random.RandomState(42) X = rng.rand(n_samples, n_features) lshf = ignore_warnings(LSHForest, category=DeprecationWarning)() ignore_warnings(lshf.fit)(X) for i in range(n_iter): n_neighbors = rng.randint(0, n_samples) query = X[rng.randint(0, n_samples)].reshape(1, -1) distances, neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors, return_distance=True) # Returned neighbors should be from closest to farthest, that is # increasing distance values. assert_true(np.all(np.diff(distances[0]) >= 0)) # Note: the radius_neighbors method does not guarantee the order of # the results. def test_fit(): # Checks whether `fit` method sets all attribute values correctly. n_samples = 12 n_features = 2 n_estimators = 5 rng = np.random.RandomState(42) X = rng.rand(n_samples, n_features) lshf = ignore_warnings(LSHForest, category=DeprecationWarning)( n_estimators=n_estimators) ignore_warnings(lshf.fit)(X) # _input_array = X assert_array_equal(X, lshf._fit_X) # A hash function g(p) for each tree assert_equal(n_estimators, len(lshf.hash_functions_)) # Hash length = 32 assert_equal(32, lshf.hash_functions_[0].components_.shape[0]) # Number of trees_ in the forest assert_equal(n_estimators, len(lshf.trees_)) # Each tree has entries for every data point assert_equal(n_samples, len(lshf.trees_[0])) # Original indices after sorting the hashes assert_equal(n_estimators, len(lshf.original_indices_)) # Each set of original indices in a tree has entries for every data point assert_equal(n_samples, len(lshf.original_indices_[0])) def test_partial_fit(): # Checks whether inserting array is consistent with fitted data. # `partial_fit` method should set all attribute values correctly. n_samples = 12 n_samples_partial_fit = 3 n_features = 2 rng = np.random.RandomState(42) X = rng.rand(n_samples, n_features) X_partial_fit = rng.rand(n_samples_partial_fit, n_features) lshf = ignore_warnings(LSHForest, category=DeprecationWarning)() # Test unfitted estimator ignore_warnings(lshf.partial_fit)(X) assert_array_equal(X, lshf._fit_X) ignore_warnings(lshf.fit)(X) # Insert wrong dimension assert_raises(ValueError, lshf.partial_fit, np.random.randn(n_samples_partial_fit, n_features - 1)) ignore_warnings(lshf.partial_fit)(X_partial_fit) # size of _input_array = samples + 1 after insertion assert_equal(lshf._fit_X.shape[0], n_samples + n_samples_partial_fit) # size of original_indices_[1] = samples + 1 assert_equal(len(lshf.original_indices_[0]), n_samples + n_samples_partial_fit) # size of trees_[1] = samples + 1 assert_equal(len(lshf.trees_[1]), n_samples + n_samples_partial_fit) def test_hash_functions(): # Checks randomness of hash functions. # Variance and mean of each hash function (projection vector) # should be different from flattened array of hash functions. # If hash functions are not randomly built (seeded with # same value), variances and means of all functions are equal. n_samples = 12 n_features = 2 n_estimators = 5 rng = np.random.RandomState(42) X = rng.rand(n_samples, n_features) lshf = ignore_warnings(LSHForest, category=DeprecationWarning)( n_estimators=n_estimators, random_state=rng.randint(0, np.iinfo(np.int32).max)) ignore_warnings(lshf.fit)(X) hash_functions = [] for i in range(n_estimators): hash_functions.append(lshf.hash_functions_[i].components_) for i in range(n_estimators): assert_not_equal(np.var(hash_functions), np.var(lshf.hash_functions_[i].components_)) for i in range(n_estimators): assert_not_equal(np.mean(hash_functions), np.mean(lshf.hash_functions_[i].components_)) def test_candidates(): # Checks whether candidates are sufficient. # This should handle the cases when number of candidates is 0. # User should be warned when number of candidates is less than # requested number of neighbors. X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]], dtype=np.float32) X_test = np.array([7, 10, 3], dtype=np.float32).reshape(1, -1) # For zero candidates lshf = ignore_warnings(LSHForest, category=DeprecationWarning)( min_hash_match=32) ignore_warnings(lshf.fit)(X_train) message = ("Number of candidates is not sufficient to retrieve" " %i neighbors with" " min_hash_match = %i. Candidates are filled up" " uniformly from unselected" " indices." % (3, 32)) assert_warns_message(UserWarning, message, lshf.kneighbors, X_test, n_neighbors=3) distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3) assert_equal(distances.shape[1], 3) # For candidates less than n_neighbors lshf = ignore_warnings(LSHForest, category=DeprecationWarning)( min_hash_match=31) ignore_warnings(lshf.fit)(X_train) message = ("Number of candidates is not sufficient to retrieve" " %i neighbors with" " min_hash_match = %i. Candidates are filled up" " uniformly from unselected" " indices." % (5, 31)) assert_warns_message(UserWarning, message, lshf.kneighbors, X_test, n_neighbors=5) distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5) assert_equal(distances.shape[1], 5) def test_graphs(): # Smoke tests for graph methods. n_samples_sizes = [5, 10, 20] n_features = 3 rng = np.random.RandomState(42) for n_samples in n_samples_sizes: X = rng.rand(n_samples, n_features) lshf = ignore_warnings(LSHForest, category=DeprecationWarning)( min_hash_match=0) ignore_warnings(lshf.fit)(X) kneighbors_graph = lshf.kneighbors_graph(X) radius_neighbors_graph = lshf.radius_neighbors_graph(X) assert_equal(kneighbors_graph.shape[0], n_samples) assert_equal(kneighbors_graph.shape[1], n_samples) assert_equal(radius_neighbors_graph.shape[0], n_samples) assert_equal(radius_neighbors_graph.shape[1], n_samples) def test_sparse_input(): # note: Fixed random state in sp.rand is not supported in older scipy. # The test should succeed regardless. X1 = sp.rand(50, 100) X2 = sp.rand(10, 100) forest_sparse = ignore_warnings(LSHForest, category=DeprecationWarning)( radius=1, random_state=0).fit(X1) forest_dense = ignore_warnings(LSHForest, category=DeprecationWarning)( radius=1, random_state=0).fit(X1.A) d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True) d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True) assert_almost_equal(d_sparse, d_dense) assert_almost_equal(i_sparse, i_dense) d_sparse, i_sparse = forest_sparse.radius_neighbors(X2, return_distance=True) d_dense, i_dense = forest_dense.radius_neighbors(X2.A, return_distance=True) assert_equal(d_sparse.shape, d_dense.shape) for a, b in zip(d_sparse, d_dense): assert_almost_equal(a, b) for a, b in zip(i_sparse, i_dense): assert_almost_equal(a, b)
bsd-3-clause
parrt/lolviz
prince_dtree.py
1
12296
import IPython, graphviz, re from io import StringIO from IPython.display import Image import numpy as np import pandas as pd import math from sklearn import tree from sklearn.datasets import load_boston, load_iris from collections import defaultdict import string import re YELLOW = "#fefecd" # "#fbfbd0" # "#FBFEB0" BLUE = "#D9E6F5" GREEN = "#cfe2d4" color_blind_friendly_colors = { 'redorange': '#f46d43', 'orange': '#fdae61', 'yellow': '#fee090', 'sky': '#e0f3f8', 'babyblue': '#abd9e9', 'lightblue': '#74add1', 'blue': '#4575b4' } color_blind_friendly_colors = [ None, # 0 classes None, # 1 class [YELLOW,BLUE], # 2 classes [YELLOW,BLUE,GREEN], # 3 classes [YELLOW,BLUE,GREEN,'#a1dab4'], # 4 [YELLOW,BLUE,GREEN,'#a1dab4','#41b6c4'], # 5 [YELLOW,'#c7e9b4','#7fcdbb','#41b6c4','#2c7fb8','#253494'], # 6 [YELLOW,'#c7e9b4','#7fcdbb','#41b6c4','#1d91c0','#225ea8','#0c2c84'], # 7 [YELLOW,'#edf8b1','#c7e9b4','#7fcdbb','#41b6c4','#1d91c0','#225ea8','#0c2c84'], # 8 [YELLOW,'#ece7f2','#d0d1e6','#a6bddb','#74a9cf','#3690c0','#0570b0','#045a8d','#023858'], # 9 [YELLOW,'#e0f3f8','#313695','#fee090','#4575b4','#fdae61','#abd9e9','#74add1','#d73027','#f46d43'] # 10 ] for x in color_blind_friendly_colors[2:]: print(x) max_class_colors = len(color_blind_friendly_colors)-1 def tree_traverse(n_nodes, children_left, children_right): """ Derives code from http://scikit-learn.org/stable/auto_examples/tree/plot_unveil_tree_structure.html to walk tree Traversing tree structure to compute compute various properties such as the depth of each node and whether or not it is a leaf. Input - n_nodes: number of nodes in the tree children_left: array of length n_nodes. left children node indexes children_right: array of length n_nodes. right children node indexes :return: is_leaf: array of length n_nodes with boolean whether node i is leaf or not, node_depth: depth of each node from root to node. root is depth 0 """ node_depth = np.zeros(shape=n_nodes, dtype=np.int64) is_leaf = np.zeros(shape=n_nodes, dtype=bool) stack = [(0, -1)] # seed is the root node id and its parent depth while len(stack) > 0: node_id, parent_depth = stack.pop() # (0,-1) node_depth[node_id] = parent_depth + 1 # If we have a non-leaf node if children_left[node_id] != children_right[node_id]: stack.append((children_left[node_id], parent_depth + 1)) stack.append((children_right[node_id], parent_depth + 1)) else: is_leaf[node_id] = True return is_leaf, node_depth # def dectree_max_depth(tree): # n_nodes = tree.node_count # children_left = tree.children_left # children_right = tree.children_right # # def walk(node_id): # if (children_left[node_id] != children_right[node_id]): # left_max = 1 + walk(children_left[node_id]) # right_max = 1 + walk(children_right[node_id]) # # if node_id<100: print(f"node {node_id}: {left_max}, {right_max}") # return max(left_max, right_max) # else: # leaf # return 1 # # root_node_id = 0 # return walk(root_node_id) def dtreeviz(tree, X, y, precision=1, classnames=None, orientation="LR"): def get_feature(i): name = X.columns[feature[i]] node_name = ''.join(c for c in name if c not in string.punctuation)+str(i) node_name = re.sub("["+string.punctuation+string.whitespace+"]", '_', node_name) return name, node_name def round(v,ndigits=precision): return format(v, '.' + str(ndigits) + 'f') def dec_node_box(name, node_name, split): html = """<table BORDER="0" CELLPADDING="0" CELLBORDER="0" CELLSPACING="0"> <tr> <td colspan="3" align="center" cellspacing="0" cellpadding="0" bgcolor="#fefecd" border="1" sides="b"><font face="Helvetica" color="#444443" point-size="12">{name}</font></td> </tr> <tr> <td colspan="3" cellpadding="1" border="0" bgcolor="#fefecd"></td> </tr> <tr> <td cellspacing="0" cellpadding="0" bgcolor="#fefecd" border="1" sides="r" align="right"><font face="Helvetica" color="#444443" point-size="11">split</font></td> <td cellspacing="0" cellpadding="0" border="0"></td> <td cellspacing="0" cellpadding="0" bgcolor="#fefecd" align="left"><font face="Helvetica" color="#444443" point-size="11">{split}</font></td> </tr> </table>""".format(name=name, split=split) return '{node_name} [shape=box label=<{label}>]\n'.format(label=html, node_name=node_name) def dec_node(name, node_name, split): html = """<font face="Helvetica" color="#444443" point-size="12">{name}<br/>@{split}</font>""".format(name=name, split=split) return '{node_name} [shape=none label=<{label}>]\n'.format(label=html, node_name=node_name) def prop_size(n): # map to 0.03 to .35 margin_range = (0.03, 0.35) if sample_count_range>0: zero_to_one = (n - min_samples) / sample_count_range return zero_to_one * (margin_range[1] - margin_range[0]) + margin_range[0] else: return margin_range[0] # parsing the tree structure n_nodes = tree.node_count # total nodes in the tree children_left = tree.children_left # left children node index children_right = tree.children_right # right children node index feature = tree.feature # feature index at splits (-2 means leaf) threshold = tree.threshold # split threshold values at given feature is_leaf, node_depth = tree_traverse(n_nodes, children_left, children_right) ranksep = ".22" if orientation=="TD": ranksep = ".35" st = '\ndigraph G {splines=line;\n \ nodesep=0.1;\n \ ranksep=%s;\n \ rankdir=%s;\n \ node [margin="0.03" penwidth="0.5" width=.1, height=.1];\n \ edge [arrowsize=.4 penwidth="0.5"]\n' % (ranksep,orientation) # Define decision nodes (non leaf nodes) as feature names for i in range(n_nodes): if not is_leaf[i]: # non leaf nodes name, node_name = get_feature(i) # st += dec_node_box(name, node_name, split=round(threshold[i])) st += dec_node(name, node_name, split=round(threshold[i])) # non leaf edges with > and <= for i in range(n_nodes): if not is_leaf[i]: name, node_name = get_feature(i) left, left_node_name = get_feature(children_left[i]) if is_leaf[children_left[i]]: left = left_node_name ='leaf%d' % children_left[i] right_name, right_node_name = get_feature(children_right[i]) if is_leaf[children_right[i]]: right = right_node_name ='leaf%d' % children_right[i] split = round(threshold[i]) left_html = '<font face="Helvetica" color="#444443" point-size="11">&lt;</font>' right_html = '<font face="Helvetica" color="#444443" point-size="11">&ge;</font>' if orientation=="TD": ldistance = ".9" rdistance = ".9" langle = "-28" rangle = "28" else: ldistance = "1.3" # not used in LR mode; just label not taillable. rdistance = "1.3" langle = "-90" rangle = "90" blankedge = 'label=<<font face="Helvetica" color="#444443" point-size="1">&nbsp;</font>>' st += '{name} -> {left} [{blankedge} labelangle="{angle}" labeldistance="{ldistance}" {tail}label=<{label}>]\n'\ .format(label="",#left_html, angle=langle, ldistance=ldistance, name=node_name, blankedge = "",#blankedge, tail="tail",#""tail" if orientation=="TD" else "", left=left_node_name) st += '{name} -> {right} [{blankedge} labelangle="{angle}" labeldistance="{rdistance}" {tail}label=<{label}>]\n' \ .format(label="",#right_html, angle=rangle, rdistance=rdistance, name=node_name, blankedge="",#blankedge, tail="tail",# "tail" if orientation == "TD" else "", right=right_node_name) # find range of leaf sample count leaf_sample_counts = [tree.n_node_samples[i] for i in range(n_nodes) if is_leaf[i]] min_samples = min(leaf_sample_counts) max_samples = max(leaf_sample_counts) sample_count_range = max_samples - min_samples print(leaf_sample_counts) print("range is ", sample_count_range) # is_classifier = hasattr(tree, 'n_classes') is_classifier = tree.n_classes > 1 color_values = list(reversed(color_blind_friendly_colors)) n_classes = tree.n_classes[0] color_values = color_blind_friendly_colors[n_classes] # color_values = [c+"EF" for c in color_values] # add alpha # Define leaf nodes (after edges so >= edges shown properly) for i in range(n_nodes): if is_leaf[i]: node_samples = tree.n_node_samples[i] impurity = tree.impurity if is_classifier: counts = np.array(tree.value[i][0]) predicted_class = np.argmax(counts) predicted = predicted_class if classnames: predicted = classnames[predicted_class] ratios = counts / node_samples # convert counts to ratios totalling 1.0 ratios = [round(r,3) for r in ratios] color_spec = ["{c};{r}".format(c=color_values[i],r=r) for i,r in enumerate(ratios)] color_spec = ':'.join(color_spec) if n_classes > max_class_colors: color_spec = YELLOW html = """<font face="Helvetica" color="black" point-size="12">{predicted}<br/>&nbsp;</font>""".format(predicted=predicted) margin = prop_size(node_samples) st += 'leaf{i} [height=0 width="0.4" margin="{margin}" style={style} fillcolor="{colors}" shape=circle label=<{label}>]\n' \ .format(i=i, label=html, name=node_name, colors=color_spec, margin=margin, style='wedged' if n_classes<=max_class_colors else 'filled') else: value = tree.value[i][0] html = """<font face="Helvetica" color="#444443" point-size="11">"""+round(value[0])+"""</font>""" margin = prop_size(node_samples) st += 'leaf{i} [height=0 width="0.4" margin="{margin}" style=filled fillcolor="{color}" shape=circle label=<{label}>]\n'\ .format(i=i, label=html, name=node_name, color=YELLOW, margin=margin) # end of string st = st+'}' return st def boston(): regr = tree.DecisionTreeRegressor(max_depth=4, random_state=666) boston = load_boston() print(boston.data.shape, boston.target.shape) data = pd.DataFrame(boston.data) data.columns =boston.feature_names regr = regr.fit(data, boston.target) # st = dectreeviz(regr.tree_, data, boston.target) st = dtreeviz(regr.tree_, data, boston.target, orientation="TD") with open("/tmp/t3.dot", "w") as f: f.write(st) return st def iris(): clf = tree.DecisionTreeClassifier(max_depth=4, random_state=666) iris = load_iris() print(iris.data.shape, iris.target.shape) data = pd.DataFrame(iris.data) data.columns = iris.feature_names clf = clf.fit(data, iris.target) # st = dectreeviz(clf.tree_, data, boston.target) st = dtreeviz(clf.tree_, data, iris.target, orientation="TD" , classnames=["setosa", "versicolor", "virginica"] ) with open("/tmp/t3.dot", "w") as f: f.write(st) print(clf.tree_.value) return st # st = iris() st = boston() print(st) graphviz.Source(st).view()
bsd-3-clause
trankmichael/scikit-learn
sklearn/neighbors/approximate.py
128
22351
"""Approximate nearest neighbor search""" # Author: Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk> # Joel Nothman <joel.nothman@gmail.com> import numpy as np import warnings from scipy import sparse from .base import KNeighborsMixin, RadiusNeighborsMixin from ..base import BaseEstimator from ..utils.validation import check_array from ..utils import check_random_state from ..metrics.pairwise import pairwise_distances from ..random_projection import GaussianRandomProjection __all__ = ["LSHForest"] HASH_DTYPE = '>u4' MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8 def _find_matching_indices(tree, bin_X, left_mask, right_mask): """Finds indices in sorted array of integers. Most significant h bits in the binary representations of the integers are matched with the items' most significant h bits. """ left_index = np.searchsorted(tree, bin_X & left_mask) right_index = np.searchsorted(tree, bin_X | right_mask, side='right') return left_index, right_index def _find_longest_prefix_match(tree, bin_X, hash_size, left_masks, right_masks): """Find the longest prefix match in tree for each query in bin_X Most significant bits are considered as the prefix. """ hi = np.empty_like(bin_X, dtype=np.intp) hi.fill(hash_size) lo = np.zeros_like(bin_X, dtype=np.intp) res = np.empty_like(bin_X, dtype=np.intp) left_idx, right_idx = _find_matching_indices(tree, bin_X, left_masks[hi], right_masks[hi]) found = right_idx > left_idx res[found] = lo[found] = hash_size r = np.arange(bin_X.shape[0]) kept = r[lo < hi] # indices remaining in bin_X mask while kept.shape[0]: mid = (lo.take(kept) + hi.take(kept)) // 2 left_idx, right_idx = _find_matching_indices(tree, bin_X.take(kept), left_masks[mid], right_masks[mid]) found = right_idx > left_idx mid_found = mid[found] lo[kept[found]] = mid_found + 1 res[kept[found]] = mid_found hi[kept[~found]] = mid[~found] kept = r[lo < hi] return res class ProjectionToHashMixin(object): """Turn a transformed real-valued array into a hash""" @staticmethod def _to_hash(projected): if projected.shape[1] % 8 != 0: raise ValueError('Require reduced dimensionality to be a multiple ' 'of 8 for hashing') # XXX: perhaps non-copying operation better out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE) return out.reshape(projected.shape[0], -1) def fit_transform(self, X, y=None): self.fit(X) return self.transform(X) def transform(self, X, y=None): return self._to_hash(super(ProjectionToHashMixin, self).transform(X)) class GaussianRandomProjectionHash(ProjectionToHashMixin, GaussianRandomProjection): """Use GaussianRandomProjection to produce a cosine LSH fingerprint""" def __init__(self, n_components=8, random_state=None): super(GaussianRandomProjectionHash, self).__init__( n_components=n_components, random_state=random_state) def _array_of_arrays(list_of_arrays): """Creates an array of array from list of arrays.""" out = np.empty(len(list_of_arrays), dtype=object) out[:] = list_of_arrays return out class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin): """Performs approximate nearest neighbor search using LSH forest. LSH Forest: Locality Sensitive Hashing forest [1] is an alternative method for vanilla approximate nearest neighbor search methods. LSH forest data structure has been implemented using sorted arrays and binary search and 32 bit fixed-length hashes. Random projection is used as the hash family which approximates cosine distance. The cosine distance is defined as ``1 - cosine_similarity``: the lowest value is 0 (identical point) but it is bounded above by 2 for the farthest points. Its value does not depend on the norm of the vector points but only on their relative angles. Read more in the :ref:`User Guide <approximate_nearest_neighbors>`. Parameters ---------- n_estimators : int (default = 10) Number of trees in the LSH Forest. min_hash_match : int (default = 4) lowest hash length to be searched when candidate selection is performed for nearest neighbors. n_candidates : int (default = 10) Minimum number of candidates evaluated per estimator, assuming enough items meet the `min_hash_match` constraint. n_neighbors : int (default = 5) Number of neighbors to be returned from query function when it is not provided to the :meth:`kneighbors` method. radius : float, optinal (default = 1.0) Radius from the data point to its neighbors. This is the parameter space to use by default for the :meth`radius_neighbors` queries. radius_cutoff_ratio : float, optional (default = 0.9) A value ranges from 0 to 1. Radius neighbors will be searched until the ratio between total neighbors within the radius and the total candidates becomes less than this value unless it is terminated by hash length reaching `min_hash_match`. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Attributes ---------- hash_functions_ : list of GaussianRandomProjectionHash objects Hash function g(p,x) for a tree is an array of 32 randomly generated float arrays with the same dimenstion as the data set. This array is stored in GaussianRandomProjectionHash object and can be obtained from ``components_`` attribute. trees_ : array, shape (n_estimators, n_samples) Each tree (corresponding to a hash function) contains an array of sorted hashed values. The array representation may change in future versions. original_indices_ : array, shape (n_estimators, n_samples) Original indices of sorted hashed values in the fitted index. References ---------- .. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning Indexes for Similarity Search", WWW '05 Proceedings of the 14th international conference on World Wide Web, 651-660, 2005. Examples -------- >>> from sklearn.neighbors import LSHForest >>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]] >>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]] >>> lshf = LSHForest() >>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10, n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9, random_state=None) >>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2) >>> distances # doctest: +ELLIPSIS array([[ 0.069..., 0.149...], [ 0.229..., 0.481...], [ 0.004..., 0.014...]]) >>> indices array([[1, 2], [2, 0], [4, 0]]) """ def __init__(self, n_estimators=10, radius=1.0, n_candidates=50, n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9, random_state=None): self.n_estimators = n_estimators self.radius = radius self.random_state = random_state self.n_candidates = n_candidates self.n_neighbors = n_neighbors self.min_hash_match = min_hash_match self.radius_cutoff_ratio = radius_cutoff_ratio def _compute_distances(self, query, candidates): """Computes the cosine distance. Distance is from the query to points in the candidates array. Returns argsort of distances in the candidates array and sorted distances. """ if candidates.shape == (0,): # needed since _fit_X[np.array([])] doesn't work if _fit_X sparse return np.empty(0, dtype=np.int), np.empty(0, dtype=float) if sparse.issparse(self._fit_X): candidate_X = self._fit_X[candidates] else: candidate_X = self._fit_X.take(candidates, axis=0, mode='clip') distances = pairwise_distances(query, candidate_X, metric='cosine')[0] distance_positions = np.argsort(distances) distances = distances.take(distance_positions, mode='clip', axis=0) return distance_positions, distances def _generate_masks(self): """Creates left and right masks for all hash lengths.""" tri_size = MAX_HASH_SIZE + 1 # Called once on fitting, output is independent of hashes left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:] right_mask = left_mask[::-1, ::-1] self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE) self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE) def _get_candidates(self, query, max_depth, bin_queries, n_neighbors): """Performs the Synchronous ascending phase. Returns an array of candidates, their distance ranks and distances. """ index_size = self._fit_X.shape[0] # Number of candidates considered including duplicates # XXX: not sure whether this is being calculated correctly wrt # duplicates from different iterations through a single tree n_candidates = 0 candidate_set = set() min_candidates = self.n_candidates * self.n_estimators while (max_depth > self.min_hash_match and (n_candidates < min_candidates or len(candidate_set) < n_neighbors)): left_mask = self._left_mask[max_depth] right_mask = self._right_mask[max_depth] for i in range(self.n_estimators): start, stop = _find_matching_indices(self.trees_[i], bin_queries[i], left_mask, right_mask) n_candidates += stop - start candidate_set.update( self.original_indices_[i][start:stop].tolist()) max_depth -= 1 candidates = np.fromiter(candidate_set, count=len(candidate_set), dtype=np.intp) # For insufficient candidates, candidates are filled. # Candidates are filled from unselected indices uniformly. if candidates.shape[0] < n_neighbors: warnings.warn( "Number of candidates is not sufficient to retrieve" " %i neighbors with" " min_hash_match = %i. Candidates are filled up" " uniformly from unselected" " indices." % (n_neighbors, self.min_hash_match)) remaining = np.setdiff1d(np.arange(0, index_size), candidates) to_fill = n_neighbors - candidates.shape[0] candidates = np.concatenate((candidates, remaining[:to_fill])) ranks, distances = self._compute_distances(query, candidates.astype(int)) return (candidates[ranks[:n_neighbors]], distances[:n_neighbors]) def _get_radius_neighbors(self, query, max_depth, bin_queries, radius): """Finds radius neighbors from the candidates obtained. Their distances from query are smaller than radius. Returns radius neighbors and distances. """ ratio_within_radius = 1 threshold = 1 - self.radius_cutoff_ratio total_candidates = np.array([], dtype=int) total_neighbors = np.array([], dtype=int) total_distances = np.array([], dtype=float) while (max_depth > self.min_hash_match and ratio_within_radius > threshold): left_mask = self._left_mask[max_depth] right_mask = self._right_mask[max_depth] candidates = [] for i in range(self.n_estimators): start, stop = _find_matching_indices(self.trees_[i], bin_queries[i], left_mask, right_mask) candidates.extend( self.original_indices_[i][start:stop].tolist()) candidates = np.setdiff1d(candidates, total_candidates) total_candidates = np.append(total_candidates, candidates) ranks, distances = self._compute_distances(query, candidates) m = np.searchsorted(distances, radius, side='right') positions = np.searchsorted(total_distances, distances[:m]) total_neighbors = np.insert(total_neighbors, positions, candidates[ranks[:m]]) total_distances = np.insert(total_distances, positions, distances[:m]) ratio_within_radius = (total_neighbors.shape[0] / float(total_candidates.shape[0])) max_depth = max_depth - 1 return total_neighbors, total_distances def fit(self, X, y=None): """Fit the LSH forest on the data. This creates binary hashes of input data points by getting the dot product of input points and hash_function then transforming the projection into a binary string array based on the sign (positive/negative) of the projection. A sorted array of binary hashes is created. Parameters ---------- X : array_like or sparse (CSR) matrix, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- self : object Returns self. """ self._fit_X = check_array(X, accept_sparse='csr') # Creates a g(p,x) for each tree self.hash_functions_ = [] self.trees_ = [] self.original_indices_ = [] rng = check_random_state(self.random_state) int_max = np.iinfo(np.int32).max for i in range(self.n_estimators): # This is g(p,x) for a particular tree. # Builds a single tree. Hashing is done on an array of data points. # `GaussianRandomProjection` is used for hashing. # `n_components=hash size and n_features=n_dim. hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE, rng.randint(0, int_max)) hashes = hasher.fit_transform(self._fit_X)[:, 0] original_index = np.argsort(hashes) bin_hashes = hashes[original_index] self.original_indices_.append(original_index) self.trees_.append(bin_hashes) self.hash_functions_.append(hasher) self._generate_masks() return self def _query(self, X): """Performs descending phase to find maximum depth.""" # Calculate hashes of shape (n_samples, n_estimators, [hash_size]) bin_queries = np.asarray([hasher.transform(X)[:, 0] for hasher in self.hash_functions_]) bin_queries = np.rollaxis(bin_queries, 1) # descend phase depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE, self._left_mask, self._right_mask) for tree, tree_queries in zip(self.trees_, np.rollaxis(bin_queries, 1))] return bin_queries, np.max(depths, axis=0) def kneighbors(self, X, n_neighbors=None, return_distance=True): """Returns n_neighbors of approximate nearest neighbors. Parameters ---------- X : array_like or sparse (CSR) matrix, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single query. n_neighbors : int, opitonal (default = None) Number of neighbors required. If not provided, this will return the number specified at the initialization. return_distance : boolean, optional (default = False) Returns the distances of neighbors if set to True. Returns ------- dist : array, shape (n_samples, n_neighbors) Array representing the cosine distances to each point, only present if return_distance=True. ind : array, shape (n_samples, n_neighbors) Indices of the approximate nearest points in the population matrix. """ if not hasattr(self, 'hash_functions_'): raise ValueError("estimator should be fitted.") if n_neighbors is None: n_neighbors = self.n_neighbors X = check_array(X, accept_sparse='csr') neighbors, distances = [], [] bin_queries, max_depth = self._query(X) for i in range(X.shape[0]): neighs, dists = self._get_candidates(X[i], max_depth[i], bin_queries[i], n_neighbors) neighbors.append(neighs) distances.append(dists) if return_distance: return np.array(distances), np.array(neighbors) else: return np.array(neighbors) def radius_neighbors(self, X, radius=None, return_distance=True): """Finds the neighbors within a given radius of a point or points. Return the indices and distances of some points from the dataset lying in a ball with size ``radius`` around the points of the query array. Points lying on the boundary are included in the results. The result points are *not* necessarily sorted by distance to their query point. LSH Forest being an approximate method, some true neighbors from the indexed dataset might be missing from the results. Parameters ---------- X : array_like or sparse (CSR) matrix, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single query. radius : float Limiting distance of neighbors to return. (default is the value passed to the constructor). return_distance : boolean, optional (default = False) Returns the distances of neighbors if set to True. Returns ------- dist : array, shape (n_samples,) of arrays Each element is an array representing the cosine distances to some points found within ``radius`` of the respective query. Only present if ``return_distance=True``. ind : array, shape (n_samples,) of arrays Each element is an array of indices for neighbors within ``radius`` of the respective query. """ if not hasattr(self, 'hash_functions_'): raise ValueError("estimator should be fitted.") if radius is None: radius = self.radius X = check_array(X, accept_sparse='csr') neighbors, distances = [], [] bin_queries, max_depth = self._query(X) for i in range(X.shape[0]): neighs, dists = self._get_radius_neighbors(X[i], max_depth[i], bin_queries[i], radius) neighbors.append(neighs) distances.append(dists) if return_distance: return _array_of_arrays(distances), _array_of_arrays(neighbors) else: return _array_of_arrays(neighbors) def partial_fit(self, X, y=None): """ Inserts new data into the already fitted LSH Forest. Cost is proportional to new total size, so additions should be batched. Parameters ---------- X : array_like or sparse (CSR) matrix, shape (n_samples, n_features) New data point to be inserted into the LSH Forest. """ X = check_array(X, accept_sparse='csr') if not hasattr(self, 'hash_functions_'): return self.fit(X) if X.shape[1] != self._fit_X.shape[1]: raise ValueError("Number of features in X and" " fitted array does not match.") n_samples = X.shape[0] n_indexed = self._fit_X.shape[0] for i in range(self.n_estimators): bin_X = self.hash_functions_[i].transform(X)[:, 0] # gets the position to be added in the tree. positions = self.trees_[i].searchsorted(bin_X) # adds the hashed value into the tree. self.trees_[i] = np.insert(self.trees_[i], positions, bin_X) # add the entry into the original_indices_. self.original_indices_[i] = np.insert(self.original_indices_[i], positions, np.arange(n_indexed, n_indexed + n_samples)) # adds the entry into the input_array. if sparse.issparse(X) or sparse.issparse(self._fit_X): self._fit_X = sparse.vstack((self._fit_X, X)) else: self._fit_X = np.row_stack((self._fit_X, X)) return self
bsd-3-clause
sugartom/tensorflow-alien
tensorflow/examples/learn/text_classification.py
39
5106
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Example of Estimator for DNN-based text classification with DBpedia data.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import sys import numpy as np import pandas from sklearn import metrics import tensorflow as tf from tensorflow.contrib.layers.python.layers import encoders learn = tf.contrib.learn FLAGS = None MAX_DOCUMENT_LENGTH = 10 EMBEDDING_SIZE = 50 n_words = 0 def bag_of_words_model(features, target): """A bag-of-words model. Note it disregards the word order in the text.""" target = tf.one_hot(target, 15, 1, 0) features = encoders.bow_encoder( features, vocab_size=n_words, embed_dim=EMBEDDING_SIZE) logits = tf.contrib.layers.fully_connected(features, 15, activation_fn=None) loss = tf.contrib.losses.softmax_cross_entropy(logits, target) train_op = tf.contrib.layers.optimize_loss( loss, tf.contrib.framework.get_global_step(), optimizer='Adam', learning_rate=0.01) return ({ 'class': tf.argmax(logits, 1), 'prob': tf.nn.softmax(logits) }, loss, train_op) def rnn_model(features, target): """RNN model to predict from sequence of words to a class.""" # Convert indexes of words into embeddings. # This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then # maps word indexes of the sequence into [batch_size, sequence_length, # EMBEDDING_SIZE]. word_vectors = tf.contrib.layers.embed_sequence( features, vocab_size=n_words, embed_dim=EMBEDDING_SIZE, scope='words') # Split into list of embedding per word, while removing doc length dim. # word_list results to be a list of tensors [batch_size, EMBEDDING_SIZE]. word_list = tf.unstack(word_vectors, axis=1) # Create a Gated Recurrent Unit cell with hidden size of EMBEDDING_SIZE. cell = tf.contrib.rnn.GRUCell(EMBEDDING_SIZE) # Create an unrolled Recurrent Neural Networks to length of # MAX_DOCUMENT_LENGTH and passes word_list as inputs for each unit. _, encoding = tf.contrib.rnn.static_rnn(cell, word_list, dtype=tf.float32) # Given encoding of RNN, take encoding of last step (e.g hidden size of the # neural network of last step) and pass it as features for logistic # regression over output classes. target = tf.one_hot(target, 15, 1, 0) logits = tf.contrib.layers.fully_connected(encoding, 15, activation_fn=None) loss = tf.contrib.losses.softmax_cross_entropy(logits, target) # Create a training op. train_op = tf.contrib.layers.optimize_loss( loss, tf.contrib.framework.get_global_step(), optimizer='Adam', learning_rate=0.01) return ({ 'class': tf.argmax(logits, 1), 'prob': tf.nn.softmax(logits) }, loss, train_op) def main(unused_argv): global n_words # Prepare training and testing data dbpedia = learn.datasets.load_dataset( 'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data) x_train = pandas.DataFrame(dbpedia.train.data)[1] y_train = pandas.Series(dbpedia.train.target) x_test = pandas.DataFrame(dbpedia.test.data)[1] y_test = pandas.Series(dbpedia.test.target) # Process vocabulary vocab_processor = learn.preprocessing.VocabularyProcessor(MAX_DOCUMENT_LENGTH) x_transform_train = vocab_processor.fit_transform(x_train) x_transform_test = vocab_processor.transform(x_test) x_train = np.array(list(x_transform_train)) x_test = np.array(list(x_transform_test)) n_words = len(vocab_processor.vocabulary_) print('Total words: %d' % n_words) # Build model # Switch between rnn_model and bag_of_words_model to test different models. model_fn = rnn_model if FLAGS.bow_model: model_fn = bag_of_words_model classifier = learn.Estimator(model_fn=model_fn) # Train and predict classifier.fit(x_train, y_train, steps=100) y_predicted = [ p['class'] for p in classifier.predict( x_test, as_iterable=True) ] score = metrics.accuracy_score(y_test, y_predicted) print('Accuracy: {0:f}'.format(score)) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( '--test_with_fake_data', default=False, help='Test the example code with fake data.', action='store_true') parser.add_argument( '--bow_model', default=False, help='Run with BOW model instead of RNN.', action='store_true') FLAGS, unparsed = parser.parse_known_args() tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
apache-2.0
aflaxman/scikit-learn
sklearn/metrics/regression.py
47
19967
"""Metrics to assess performance on regression task Functions named as ``*_score`` return a scalar value to maximize: the higher the better Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize: the lower the better """ # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Mathieu Blondel <mathieu@mblondel.org> # Olivier Grisel <olivier.grisel@ensta.org> # Arnaud Joly <a.joly@ulg.ac.be> # Jochen Wersdorfer <jochen@wersdoerfer.de> # Lars Buitinck # Joel Nothman <joel.nothman@gmail.com> # Karan Desai <karandesai281196@gmail.com> # Noel Dawe <noel@dawe.me> # Manoj Kumar <manojkumarsivaraj334@gmail.com> # Michael Eickenberg <michael.eickenberg@gmail.com> # Konstantin Shmelkov <konstantin.shmelkov@polytechnique.edu> # License: BSD 3 clause from __future__ import division import numpy as np from ..utils.validation import check_array, check_consistent_length from ..utils.validation import column_or_1d from ..externals.six import string_types __ALL__ = [ "mean_absolute_error", "mean_squared_error", "mean_squared_log_error", "median_absolute_error", "r2_score", "explained_variance_score" ] def _check_reg_targets(y_true, y_pred, multioutput): """Check that y_true and y_pred belong to the same regression task Parameters ---------- y_true : array-like, y_pred : array-like, multioutput : array-like or string in ['raw_values', uniform_average', 'variance_weighted'] or None None is accepted due to backward compatibility of r2_score(). Returns ------- type_true : one of {'continuous', continuous-multioutput'} The type of the true target data, as output by 'utils.multiclass.type_of_target' y_true : array-like of shape = (n_samples, n_outputs) Ground truth (correct) target values. y_pred : array-like of shape = (n_samples, n_outputs) Estimated target values. multioutput : array-like of shape = (n_outputs) or string in ['raw_values', uniform_average', 'variance_weighted'] or None Custom output weights if ``multioutput`` is array-like or just the corresponding argument if ``multioutput`` is a correct keyword. """ check_consistent_length(y_true, y_pred) y_true = check_array(y_true, ensure_2d=False) y_pred = check_array(y_pred, ensure_2d=False) if y_true.ndim == 1: y_true = y_true.reshape((-1, 1)) if y_pred.ndim == 1: y_pred = y_pred.reshape((-1, 1)) if y_true.shape[1] != y_pred.shape[1]: raise ValueError("y_true and y_pred have different number of output " "({0}!={1})".format(y_true.shape[1], y_pred.shape[1])) n_outputs = y_true.shape[1] allowed_multioutput_str = ('raw_values', 'uniform_average', 'variance_weighted') if isinstance(multioutput, string_types): if multioutput not in allowed_multioutput_str: raise ValueError("Allowed 'multioutput' string values are {}. " "You provided multioutput={!r}".format( allowed_multioutput_str, multioutput)) elif multioutput is not None: multioutput = check_array(multioutput, ensure_2d=False) if n_outputs == 1: raise ValueError("Custom weights are useful only in " "multi-output cases.") elif n_outputs != len(multioutput): raise ValueError(("There must be equally many custom weights " "(%d) as outputs (%d).") % (len(multioutput), n_outputs)) y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput' return y_type, y_true, y_pred, multioutput def mean_absolute_error(y_true, y_pred, sample_weight=None, multioutput='uniform_average'): """Mean absolute error regression loss Read more in the :ref:`User Guide <mean_absolute_error>`. Parameters ---------- y_true : array-like of shape = (n_samples) or (n_samples, n_outputs) Ground truth (correct) target values. y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs) Estimated target values. sample_weight : array-like of shape = (n_samples), optional Sample weights. multioutput : string in ['raw_values', 'uniform_average'] or array-like of shape (n_outputs) Defines aggregating of multiple output values. Array-like value defines weights used to average errors. 'raw_values' : Returns a full set of errors in case of multioutput input. 'uniform_average' : Errors of all outputs are averaged with uniform weight. Returns ------- loss : float or ndarray of floats If multioutput is 'raw_values', then mean absolute error is returned for each output separately. If multioutput is 'uniform_average' or an ndarray of weights, then the weighted average of all output errors is returned. MAE output is non-negative floating point. The best value is 0.0. Examples -------- >>> from sklearn.metrics import mean_absolute_error >>> y_true = [3, -0.5, 2, 7] >>> y_pred = [2.5, 0.0, 2, 8] >>> mean_absolute_error(y_true, y_pred) 0.5 >>> y_true = [[0.5, 1], [-1, 1], [7, -6]] >>> y_pred = [[0, 2], [-1, 2], [8, -5]] >>> mean_absolute_error(y_true, y_pred) 0.75 >>> mean_absolute_error(y_true, y_pred, multioutput='raw_values') array([ 0.5, 1. ]) >>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7]) ... # doctest: +ELLIPSIS 0.849... """ y_type, y_true, y_pred, multioutput = _check_reg_targets( y_true, y_pred, multioutput) output_errors = np.average(np.abs(y_pred - y_true), weights=sample_weight, axis=0) if isinstance(multioutput, string_types): if multioutput == 'raw_values': return output_errors elif multioutput == 'uniform_average': # pass None as weights to np.average: uniform mean multioutput = None return np.average(output_errors, weights=multioutput) def mean_squared_error(y_true, y_pred, sample_weight=None, multioutput='uniform_average'): """Mean squared error regression loss Read more in the :ref:`User Guide <mean_squared_error>`. Parameters ---------- y_true : array-like of shape = (n_samples) or (n_samples, n_outputs) Ground truth (correct) target values. y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs) Estimated target values. sample_weight : array-like of shape = (n_samples), optional Sample weights. multioutput : string in ['raw_values', 'uniform_average'] or array-like of shape (n_outputs) Defines aggregating of multiple output values. Array-like value defines weights used to average errors. 'raw_values' : Returns a full set of errors in case of multioutput input. 'uniform_average' : Errors of all outputs are averaged with uniform weight. Returns ------- loss : float or ndarray of floats A non-negative floating point value (the best value is 0.0), or an array of floating point values, one for each individual target. Examples -------- >>> from sklearn.metrics import mean_squared_error >>> y_true = [3, -0.5, 2, 7] >>> y_pred = [2.5, 0.0, 2, 8] >>> mean_squared_error(y_true, y_pred) 0.375 >>> y_true = [[0.5, 1],[-1, 1],[7, -6]] >>> y_pred = [[0, 2],[-1, 2],[8, -5]] >>> mean_squared_error(y_true, y_pred) # doctest: +ELLIPSIS 0.708... >>> mean_squared_error(y_true, y_pred, multioutput='raw_values') ... # doctest: +ELLIPSIS array([ 0.416..., 1. ]) >>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7]) ... # doctest: +ELLIPSIS 0.824... """ y_type, y_true, y_pred, multioutput = _check_reg_targets( y_true, y_pred, multioutput) output_errors = np.average((y_true - y_pred) ** 2, axis=0, weights=sample_weight) if isinstance(multioutput, string_types): if multioutput == 'raw_values': return output_errors elif multioutput == 'uniform_average': # pass None as weights to np.average: uniform mean multioutput = None return np.average(output_errors, weights=multioutput) def mean_squared_log_error(y_true, y_pred, sample_weight=None, multioutput='uniform_average'): """Mean squared logarithmic error regression loss Read more in the :ref:`User Guide <mean_squared_log_error>`. Parameters ---------- y_true : array-like of shape = (n_samples) or (n_samples, n_outputs) Ground truth (correct) target values. y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs) Estimated target values. sample_weight : array-like of shape = (n_samples), optional Sample weights. multioutput : string in ['raw_values', 'uniform_average'] \ or array-like of shape = (n_outputs) Defines aggregating of multiple output values. Array-like value defines weights used to average errors. 'raw_values' : Returns a full set of errors when the input is of multioutput format. 'uniform_average' : Errors of all outputs are averaged with uniform weight. Returns ------- loss : float or ndarray of floats A non-negative floating point value (the best value is 0.0), or an array of floating point values, one for each individual target. Examples -------- >>> from sklearn.metrics import mean_squared_log_error >>> y_true = [3, 5, 2.5, 7] >>> y_pred = [2.5, 5, 4, 8] >>> mean_squared_log_error(y_true, y_pred) # doctest: +ELLIPSIS 0.039... >>> y_true = [[0.5, 1], [1, 2], [7, 6]] >>> y_pred = [[0.5, 2], [1, 2.5], [8, 8]] >>> mean_squared_log_error(y_true, y_pred) # doctest: +ELLIPSIS 0.044... >>> mean_squared_log_error(y_true, y_pred, multioutput='raw_values') ... # doctest: +ELLIPSIS array([ 0.004..., 0.083...]) >>> mean_squared_log_error(y_true, y_pred, multioutput=[0.3, 0.7]) ... # doctest: +ELLIPSIS 0.060... """ y_type, y_true, y_pred, multioutput = _check_reg_targets( y_true, y_pred, multioutput) if not (y_true >= 0).all() and not (y_pred >= 0).all(): raise ValueError("Mean Squared Logarithmic Error cannot be used when " "targets contain negative values.") return mean_squared_error(np.log(y_true + 1), np.log(y_pred + 1), sample_weight, multioutput) def median_absolute_error(y_true, y_pred): """Median absolute error regression loss Read more in the :ref:`User Guide <median_absolute_error>`. Parameters ---------- y_true : array-like of shape = (n_samples) Ground truth (correct) target values. y_pred : array-like of shape = (n_samples) Estimated target values. Returns ------- loss : float A positive floating point value (the best value is 0.0). Examples -------- >>> from sklearn.metrics import median_absolute_error >>> y_true = [3, -0.5, 2, 7] >>> y_pred = [2.5, 0.0, 2, 8] >>> median_absolute_error(y_true, y_pred) 0.5 """ y_type, y_true, y_pred, _ = _check_reg_targets(y_true, y_pred, 'uniform_average') if y_type == 'continuous-multioutput': raise ValueError("Multioutput not supported in median_absolute_error") return np.median(np.abs(y_pred - y_true)) def explained_variance_score(y_true, y_pred, sample_weight=None, multioutput='uniform_average'): """Explained variance regression score function Best possible score is 1.0, lower values are worse. Read more in the :ref:`User Guide <explained_variance_score>`. Parameters ---------- y_true : array-like of shape = (n_samples) or (n_samples, n_outputs) Ground truth (correct) target values. y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs) Estimated target values. sample_weight : array-like of shape = (n_samples), optional Sample weights. multioutput : string in ['raw_values', 'uniform_average', \ 'variance_weighted'] or array-like of shape (n_outputs) Defines aggregating of multiple output scores. Array-like value defines weights used to average scores. 'raw_values' : Returns a full set of scores in case of multioutput input. 'uniform_average' : Scores of all outputs are averaged with uniform weight. 'variance_weighted' : Scores of all outputs are averaged, weighted by the variances of each individual output. Returns ------- score : float or ndarray of floats The explained variance or ndarray if 'multioutput' is 'raw_values'. Notes ----- This is not a symmetric function. Examples -------- >>> from sklearn.metrics import explained_variance_score >>> y_true = [3, -0.5, 2, 7] >>> y_pred = [2.5, 0.0, 2, 8] >>> explained_variance_score(y_true, y_pred) # doctest: +ELLIPSIS 0.957... >>> y_true = [[0.5, 1], [-1, 1], [7, -6]] >>> y_pred = [[0, 2], [-1, 2], [8, -5]] >>> explained_variance_score(y_true, y_pred, multioutput='uniform_average') ... # doctest: +ELLIPSIS 0.983... """ y_type, y_true, y_pred, multioutput = _check_reg_targets( y_true, y_pred, multioutput) y_diff_avg = np.average(y_true - y_pred, weights=sample_weight, axis=0) numerator = np.average((y_true - y_pred - y_diff_avg) ** 2, weights=sample_weight, axis=0) y_true_avg = np.average(y_true, weights=sample_weight, axis=0) denominator = np.average((y_true - y_true_avg) ** 2, weights=sample_weight, axis=0) nonzero_numerator = numerator != 0 nonzero_denominator = denominator != 0 valid_score = nonzero_numerator & nonzero_denominator output_scores = np.ones(y_true.shape[1]) output_scores[valid_score] = 1 - (numerator[valid_score] / denominator[valid_score]) output_scores[nonzero_numerator & ~nonzero_denominator] = 0. if isinstance(multioutput, string_types): if multioutput == 'raw_values': # return scores individually return output_scores elif multioutput == 'uniform_average': # passing to np.average() None as weights results is uniform mean avg_weights = None elif multioutput == 'variance_weighted': avg_weights = denominator else: avg_weights = multioutput return np.average(output_scores, weights=avg_weights) def r2_score(y_true, y_pred, sample_weight=None, multioutput="uniform_average"): """R^2 (coefficient of determination) regression score function. Best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of y, disregarding the input features, would get a R^2 score of 0.0. Read more in the :ref:`User Guide <r2_score>`. Parameters ---------- y_true : array-like of shape = (n_samples) or (n_samples, n_outputs) Ground truth (correct) target values. y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs) Estimated target values. sample_weight : array-like of shape = (n_samples), optional Sample weights. multioutput : string in ['raw_values', 'uniform_average', \ 'variance_weighted'] or None or array-like of shape (n_outputs) Defines aggregating of multiple output scores. Array-like value defines weights used to average scores. Default is "uniform_average". 'raw_values' : Returns a full set of scores in case of multioutput input. 'uniform_average' : Scores of all outputs are averaged with uniform weight. 'variance_weighted' : Scores of all outputs are averaged, weighted by the variances of each individual output. .. versionchanged:: 0.19 Default value of multioutput is 'uniform_average'. Returns ------- z : float or ndarray of floats The R^2 score or ndarray of scores if 'multioutput' is 'raw_values'. Notes ----- This is not a symmetric function. Unlike most other scores, R^2 score may be negative (it need not actually be the square of a quantity R). References ---------- .. [1] `Wikipedia entry on the Coefficient of determination <https://en.wikipedia.org/wiki/Coefficient_of_determination>`_ Examples -------- >>> from sklearn.metrics import r2_score >>> y_true = [3, -0.5, 2, 7] >>> y_pred = [2.5, 0.0, 2, 8] >>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS 0.948... >>> y_true = [[0.5, 1], [-1, 1], [7, -6]] >>> y_pred = [[0, 2], [-1, 2], [8, -5]] >>> r2_score(y_true, y_pred, multioutput='variance_weighted') ... # doctest: +ELLIPSIS 0.938... >>> y_true = [1,2,3] >>> y_pred = [1,2,3] >>> r2_score(y_true, y_pred) 1.0 >>> y_true = [1,2,3] >>> y_pred = [2,2,2] >>> r2_score(y_true, y_pred) 0.0 >>> y_true = [1,2,3] >>> y_pred = [3,2,1] >>> r2_score(y_true, y_pred) -3.0 """ y_type, y_true, y_pred, multioutput = _check_reg_targets( y_true, y_pred, multioutput) if sample_weight is not None: sample_weight = column_or_1d(sample_weight) weight = sample_weight[:, np.newaxis] else: weight = 1. numerator = (weight * (y_true - y_pred) ** 2).sum(axis=0, dtype=np.float64) denominator = (weight * (y_true - np.average( y_true, axis=0, weights=sample_weight)) ** 2).sum(axis=0, dtype=np.float64) nonzero_denominator = denominator != 0 nonzero_numerator = numerator != 0 valid_score = nonzero_denominator & nonzero_numerator output_scores = np.ones([y_true.shape[1]]) output_scores[valid_score] = 1 - (numerator[valid_score] / denominator[valid_score]) # arbitrary set to zero to avoid -inf scores, having a constant # y_true is not interesting for scoring a regression anyway output_scores[nonzero_numerator & ~nonzero_denominator] = 0. if isinstance(multioutput, string_types): if multioutput == 'raw_values': # return scores individually return output_scores elif multioutput == 'uniform_average': # passing None as weights results is uniform mean avg_weights = None elif multioutput == 'variance_weighted': avg_weights = denominator # avoid fail on constant y or one-element arrays if not np.any(nonzero_denominator): if not np.any(nonzero_numerator): return 1.0 else: return 0.0 else: avg_weights = multioutput return np.average(output_scores, weights=avg_weights)
bsd-3-clause
trungnt13/scikit-learn
examples/linear_model/plot_ols.py
220
1940
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= Linear Regression Example ========================================================= This example uses the only the first feature of the `diabetes` dataset, in order to illustrate a two-dimensional plot of this regression technique. The straight line can be seen in the plot, showing how linear regression attempts to draw a straight line that will best minimize the residual sum of squares between the observed responses in the dataset, and the responses predicted by the linear approximation. The coefficients, the residual sum of squares and the variance score are also calculated. """ print(__doc__) # Code source: Jaques Grobler # License: BSD 3 clause import matplotlib.pyplot as plt import numpy as np from sklearn import datasets, linear_model # Load the diabetes dataset diabetes = datasets.load_diabetes() # Use only one feature diabetes_X = diabetes.data[:, np.newaxis, 2] # Split the data into training/testing sets diabetes_X_train = diabetes_X[:-20] diabetes_X_test = diabetes_X[-20:] # Split the targets into training/testing sets diabetes_y_train = diabetes.target[:-20] diabetes_y_test = diabetes.target[-20:] # Create linear regression object regr = linear_model.LinearRegression() # Train the model using the training sets regr.fit(diabetes_X_train, diabetes_y_train) # The coefficients print('Coefficients: \n', regr.coef_) # The mean square error print("Residual sum of squares: %.2f" % np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2)) # Explained variance score: 1 is perfect prediction print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test)) # Plot outputs plt.scatter(diabetes_X_test, diabetes_y_test, color='black') plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue', linewidth=3) plt.xticks(()) plt.yticks(()) plt.show()
bsd-3-clause
anirudhjayaraman/scikit-learn
sklearn/utils/tests/test_extmath.py
70
16531
# Authors: Olivier Grisel <olivier.grisel@ensta.org> # Mathieu Blondel <mathieu@mblondel.org> # Denis Engemann <d.engemann@fz-juelich.de> # # License: BSD 3 clause import numpy as np from scipy import sparse from scipy import linalg from scipy import stats from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_raises from sklearn.utils.extmath import density from sklearn.utils.extmath import logsumexp from sklearn.utils.extmath import norm, squared_norm from sklearn.utils.extmath import randomized_svd from sklearn.utils.extmath import row_norms from sklearn.utils.extmath import weighted_mode from sklearn.utils.extmath import cartesian from sklearn.utils.extmath import log_logistic from sklearn.utils.extmath import fast_dot, _fast_dot from sklearn.utils.extmath import svd_flip from sklearn.utils.extmath import _batch_mean_variance_update from sklearn.utils.extmath import _deterministic_vector_sign_flip from sklearn.utils.extmath import softmax from sklearn.datasets.samples_generator import make_low_rank_matrix def test_density(): rng = np.random.RandomState(0) X = rng.randint(10, size=(10, 5)) X[1, 2] = 0 X[5, 3] = 0 X_csr = sparse.csr_matrix(X) X_csc = sparse.csc_matrix(X) X_coo = sparse.coo_matrix(X) X_lil = sparse.lil_matrix(X) for X_ in (X_csr, X_csc, X_coo, X_lil): assert_equal(density(X_), density(X)) def test_uniform_weights(): # with uniform weights, results should be identical to stats.mode rng = np.random.RandomState(0) x = rng.randint(10, size=(10, 5)) weights = np.ones(x.shape) for axis in (None, 0, 1): mode, score = stats.mode(x, axis) mode2, score2 = weighted_mode(x, weights, axis) assert_true(np.all(mode == mode2)) assert_true(np.all(score == score2)) def test_random_weights(): # set this up so that each row should have a weighted mode of 6, # with a score that is easily reproduced mode_result = 6 rng = np.random.RandomState(0) x = rng.randint(mode_result, size=(100, 10)) w = rng.random_sample(x.shape) x[:, :5] = mode_result w[:, :5] += 1 mode, score = weighted_mode(x, w, axis=1) assert_array_equal(mode, mode_result) assert_array_almost_equal(score.ravel(), w[:, :5].sum(1)) def test_logsumexp(): # Try to add some smallish numbers in logspace x = np.array([1e-40] * 1000000) logx = np.log(x) assert_almost_equal(np.exp(logsumexp(logx)), x.sum()) X = np.vstack([x, x]) logX = np.vstack([logx, logx]) assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0)) assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1)) def test_randomized_svd_low_rank(): # Check that extmath.randomized_svd is consistent with linalg.svd n_samples = 100 n_features = 500 rank = 5 k = 10 # generate a matrix X of approximate effective rank `rank` and no noise # component (very structured signal): X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features, effective_rank=rank, tail_strength=0.0, random_state=0) assert_equal(X.shape, (n_samples, n_features)) # compute the singular values of X using the slow exact method U, s, V = linalg.svd(X, full_matrices=False) # compute the singular values of X using the fast approximate method Ua, sa, Va = randomized_svd(X, k) assert_equal(Ua.shape, (n_samples, k)) assert_equal(sa.shape, (k,)) assert_equal(Va.shape, (k, n_features)) # ensure that the singular values of both methods are equal up to the real # rank of the matrix assert_almost_equal(s[:k], sa) # check the singular vectors too (while not checking the sign) assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va)) # check the sparse matrix representation X = sparse.csr_matrix(X) # compute the singular values of X using the fast approximate method Ua, sa, Va = randomized_svd(X, k) assert_almost_equal(s[:rank], sa[:rank]) def test_norm_squared_norm(): X = np.random.RandomState(42).randn(50, 63) X *= 100 # check stability X += 200 assert_almost_equal(np.linalg.norm(X.ravel()), norm(X)) assert_almost_equal(norm(X) ** 2, squared_norm(X), decimal=6) assert_almost_equal(np.linalg.norm(X), np.sqrt(squared_norm(X)), decimal=6) def test_row_norms(): X = np.random.RandomState(42).randn(100, 100) sq_norm = (X ** 2).sum(axis=1) assert_array_almost_equal(sq_norm, row_norms(X, squared=True), 5) assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X)) Xcsr = sparse.csr_matrix(X, dtype=np.float32) assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True), 5) assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr)) def test_randomized_svd_low_rank_with_noise(): # Check that extmath.randomized_svd can handle noisy matrices n_samples = 100 n_features = 500 rank = 5 k = 10 # generate a matrix X wity structure approximate rank `rank` and an # important noisy component X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features, effective_rank=rank, tail_strength=0.5, random_state=0) assert_equal(X.shape, (n_samples, n_features)) # compute the singular values of X using the slow exact method _, s, _ = linalg.svd(X, full_matrices=False) # compute the singular values of X using the fast approximate method # without the iterated power method _, sa, _ = randomized_svd(X, k, n_iter=0) # the approximation does not tolerate the noise: assert_greater(np.abs(s[:k] - sa).max(), 0.05) # compute the singular values of X using the fast approximate method with # iterated power method _, sap, _ = randomized_svd(X, k, n_iter=5) # the iterated power method is helping getting rid of the noise: assert_almost_equal(s[:k], sap, decimal=3) def test_randomized_svd_infinite_rank(): # Check that extmath.randomized_svd can handle noisy matrices n_samples = 100 n_features = 500 rank = 5 k = 10 # let us try again without 'low_rank component': just regularly but slowly # decreasing singular values: the rank of the data matrix is infinite X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features, effective_rank=rank, tail_strength=1.0, random_state=0) assert_equal(X.shape, (n_samples, n_features)) # compute the singular values of X using the slow exact method _, s, _ = linalg.svd(X, full_matrices=False) # compute the singular values of X using the fast approximate method # without the iterated power method _, sa, _ = randomized_svd(X, k, n_iter=0) # the approximation does not tolerate the noise: assert_greater(np.abs(s[:k] - sa).max(), 0.1) # compute the singular values of X using the fast approximate method with # iterated power method _, sap, _ = randomized_svd(X, k, n_iter=5) # the iterated power method is still managing to get most of the structure # at the requested rank assert_almost_equal(s[:k], sap, decimal=3) def test_randomized_svd_transpose_consistency(): # Check that transposing the design matrix has limit impact n_samples = 100 n_features = 500 rank = 4 k = 10 X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features, effective_rank=rank, tail_strength=0.5, random_state=0) assert_equal(X.shape, (n_samples, n_features)) U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False, random_state=0) U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True, random_state=0) U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto', random_state=0) U4, s4, V4 = linalg.svd(X, full_matrices=False) assert_almost_equal(s1, s4[:k], decimal=3) assert_almost_equal(s2, s4[:k], decimal=3) assert_almost_equal(s3, s4[:k], decimal=3) assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]), decimal=2) assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]), decimal=2) # in this case 'auto' is equivalent to transpose assert_almost_equal(s2, s3) def test_svd_flip(): # Check that svd_flip works in both situations, and reconstructs input. rs = np.random.RandomState(1999) n_samples = 20 n_features = 10 X = rs.randn(n_samples, n_features) # Check matrix reconstruction U, S, V = linalg.svd(X, full_matrices=False) U1, V1 = svd_flip(U, V, u_based_decision=False) assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6) # Check transposed matrix reconstruction XT = X.T U, S, V = linalg.svd(XT, full_matrices=False) U2, V2 = svd_flip(U, V, u_based_decision=True) assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6) # Check that different flip methods are equivalent under reconstruction U_flip1, V_flip1 = svd_flip(U, V, u_based_decision=True) assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6) U_flip2, V_flip2 = svd_flip(U, V, u_based_decision=False) assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6) def test_randomized_svd_sign_flip(): a = np.array([[2.0, 0.0], [0.0, 1.0]]) u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41) for seed in range(10): u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed) assert_almost_equal(u1, u2) assert_almost_equal(v1, v2) assert_almost_equal(np.dot(u2 * s2, v2), a) assert_almost_equal(np.dot(u2.T, u2), np.eye(2)) assert_almost_equal(np.dot(v2.T, v2), np.eye(2)) def test_cartesian(): # Check if cartesian product delivers the right results axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7])) true_out = np.array([[1, 4, 6], [1, 4, 7], [1, 5, 6], [1, 5, 7], [2, 4, 6], [2, 4, 7], [2, 5, 6], [2, 5, 7], [3, 4, 6], [3, 4, 7], [3, 5, 6], [3, 5, 7]]) out = cartesian(axes) assert_array_equal(true_out, out) # check single axis x = np.arange(3) assert_array_equal(x[:, np.newaxis], cartesian((x,))) def test_logistic_sigmoid(): # Check correctness and robustness of logistic sigmoid implementation naive_logistic = lambda x: 1 / (1 + np.exp(-x)) naive_log_logistic = lambda x: np.log(naive_logistic(x)) x = np.linspace(-2, 2, 50) assert_array_almost_equal(log_logistic(x), naive_log_logistic(x)) extreme_x = np.array([-100., 100.]) assert_array_almost_equal(log_logistic(extreme_x), [-100, 0]) def test_fast_dot(): # Check fast dot blas wrapper function if fast_dot is np.dot: return rng = np.random.RandomState(42) A = rng.random_sample([2, 10]) B = rng.random_sample([2, 10]) try: linalg.get_blas_funcs(['gemm'])[0] has_blas = True except (AttributeError, ValueError): has_blas = False if has_blas: # Test _fast_dot for invalid input. # Maltyped data. for dt1, dt2 in [['f8', 'f4'], ['i4', 'i4']]: assert_raises(ValueError, _fast_dot, A.astype(dt1), B.astype(dt2).T) # Malformed data. ## ndim == 0 E = np.empty(0) assert_raises(ValueError, _fast_dot, E, E) ## ndim == 1 assert_raises(ValueError, _fast_dot, A, A[0]) ## ndim > 2 assert_raises(ValueError, _fast_dot, A.T, np.array([A, A])) ## min(shape) == 1 assert_raises(ValueError, _fast_dot, A, A[0, :][None, :]) # test for matrix mismatch error assert_raises(ValueError, _fast_dot, A, A) # Test cov-like use case + dtypes. for dtype in ['f8', 'f4']: A = A.astype(dtype) B = B.astype(dtype) # col < row C = np.dot(A.T, A) C_ = fast_dot(A.T, A) assert_almost_equal(C, C_, decimal=5) C = np.dot(A.T, B) C_ = fast_dot(A.T, B) assert_almost_equal(C, C_, decimal=5) C = np.dot(A, B.T) C_ = fast_dot(A, B.T) assert_almost_equal(C, C_, decimal=5) # Test square matrix * rectangular use case. A = rng.random_sample([2, 2]) for dtype in ['f8', 'f4']: A = A.astype(dtype) B = B.astype(dtype) C = np.dot(A, B) C_ = fast_dot(A, B) assert_almost_equal(C, C_, decimal=5) C = np.dot(A.T, B) C_ = fast_dot(A.T, B) assert_almost_equal(C, C_, decimal=5) if has_blas: for x in [np.array([[d] * 10] * 2) for d in [np.inf, np.nan]]: assert_raises(ValueError, _fast_dot, x, x.T) def test_incremental_variance_update_formulas(): # Test Youngs and Cramer incremental variance formulas. # Doggie data from http://www.mathsisfun.com/data/standard-deviation.html A = np.array([[600, 470, 170, 430, 300], [600, 470, 170, 430, 300], [600, 470, 170, 430, 300], [600, 470, 170, 430, 300]]).T idx = 2 X1 = A[:idx, :] X2 = A[idx:, :] old_means = X1.mean(axis=0) old_variances = X1.var(axis=0) old_sample_count = X1.shape[0] final_means, final_variances, final_count = _batch_mean_variance_update( X2, old_means, old_variances, old_sample_count) assert_almost_equal(final_means, A.mean(axis=0), 6) assert_almost_equal(final_variances, A.var(axis=0), 6) assert_almost_equal(final_count, A.shape[0]) def test_incremental_variance_ddof(): # Test that degrees of freedom parameter for calculations are correct. rng = np.random.RandomState(1999) X = rng.randn(50, 10) n_samples, n_features = X.shape for batch_size in [11, 20, 37]: steps = np.arange(0, X.shape[0], batch_size) if steps[-1] != X.shape[0]: steps = np.hstack([steps, n_samples]) for i, j in zip(steps[:-1], steps[1:]): batch = X[i:j, :] if i == 0: incremental_means = batch.mean(axis=0) incremental_variances = batch.var(axis=0) # Assign this twice so that the test logic is consistent incremental_count = batch.shape[0] sample_count = batch.shape[0] else: result = _batch_mean_variance_update( batch, incremental_means, incremental_variances, sample_count) (incremental_means, incremental_variances, incremental_count) = result sample_count += batch.shape[0] calculated_means = np.mean(X[:j], axis=0) calculated_variances = np.var(X[:j], axis=0) assert_almost_equal(incremental_means, calculated_means, 6) assert_almost_equal(incremental_variances, calculated_variances, 6) assert_equal(incremental_count, sample_count) def test_vector_sign_flip(): # Testing that sign flip is working & largest value has positive sign data = np.random.RandomState(36).randn(5, 5) max_abs_rows = np.argmax(np.abs(data), axis=1) data_flipped = _deterministic_vector_sign_flip(data) max_rows = np.argmax(data_flipped, axis=1) assert_array_equal(max_abs_rows, max_rows) signs = np.sign(data[range(data.shape[0]), max_abs_rows]) assert_array_equal(data, data_flipped * signs[:, np.newaxis]) def test_softmax(): rng = np.random.RandomState(0) X = rng.randn(3, 5) exp_X = np.exp(X) sum_exp_X = np.sum(exp_X, axis=1).reshape((-1, 1)) assert_array_almost_equal(softmax(X), exp_X / sum_exp_X)
bsd-3-clause
SuperJohn/scikit-class
grid_search.py
6
1243
import pandas as pd import numpy as np df = pd.read_csv('tweets.csv') target = df['is_there_an_emotion_directed_at_a_brand_or_product'] text = df['tweet_text'] fixed_text = text[pd.notnull(text)] fixed_target = target[pd.notnull(text)] from sklearn.feature_extraction.text import CountVectorizer from sklearn.naive_bayes import MultinomialNB from sklearn.pipeline import Pipeline from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import chi2 p = Pipeline(steps=[('counts', CountVectorizer()), ('feature_selection', SelectKBest(chi2)), ('multinomialnb', MultinomialNB())]) from sklearn.grid_search import GridSearchCV parameters = { 'counts__max_df': (0.5, 0.75, 1.0), 'counts__min_df': (1, 2, 3), 'counts__ngram_range': ((1,1), (1,2)), # 'feature_selection__k': (1000, 10000, 100000) } grid_search = GridSearchCV(p, parameters, n_jobs=1, verbose=1, cv=10) grid_search.fit(fixed_text, fixed_target) print("Best score: %0.3f" % grid_search.best_score_) print("Best parameters set:") best_parameters = grid_search.best_estimator_.get_params() for param_name in sorted(parameters.keys()): print("\t%s: %r" % (param_name, best_parameters[param_name]))
gpl-2.0
meduz/scikit-learn
examples/linear_model/plot_ransac.py
73
1859
""" =========================================== Robust linear model estimation using RANSAC =========================================== In this example we see how to robustly fit a linear model to faulty data using the RANSAC algorithm. """ import numpy as np from matplotlib import pyplot as plt from sklearn import linear_model, datasets n_samples = 1000 n_outliers = 50 X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1, n_informative=1, noise=10, coef=True, random_state=0) # Add outlier data np.random.seed(0) X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1)) y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers) # Fit line using all data model = linear_model.LinearRegression() model.fit(X, y) # Robustly fit linear model with RANSAC algorithm model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression()) model_ransac.fit(X, y) inlier_mask = model_ransac.inlier_mask_ outlier_mask = np.logical_not(inlier_mask) # Predict data of estimated models line_X = np.arange(-5, 5) line_y = model.predict(line_X[:, np.newaxis]) line_y_ransac = model_ransac.predict(line_X[:, np.newaxis]) # Compare estimated coefficients print("Estimated coefficients (true, normal, RANSAC):") print(coef, model.coef_, model_ransac.estimator_.coef_) lw = 2 plt.scatter(X[inlier_mask], y[inlier_mask], color='yellowgreen', marker='.', label='Inliers') plt.scatter(X[outlier_mask], y[outlier_mask], color='gold', marker='.', label='Outliers') plt.plot(line_X, line_y, color='navy', linestyle='-', linewidth=lw, label='Linear regressor') plt.plot(line_X, line_y_ransac, color='cornflowerblue', linestyle='-', linewidth=lw, label='RANSAC regressor') plt.legend(loc='lower right') plt.show()
bsd-3-clause
liyu1990/sklearn
examples/ensemble/plot_gradient_boosting_oob.py
50
4764
""" ====================================== Gradient Boosting Out-of-Bag estimates ====================================== Out-of-bag (OOB) estimates can be a useful heuristic to estimate the "optimal" number of boosting iterations. OOB estimates are almost identical to cross-validation estimates but they can be computed on-the-fly without the need for repeated model fitting. OOB estimates are only available for Stochastic Gradient Boosting (i.e. ``subsample < 1.0``), the estimates are derived from the improvement in loss based on the examples not included in the bootstrap sample (the so-called out-of-bag examples). The OOB estimator is a pessimistic estimator of the true test loss, but remains a fairly good approximation for a small number of trees. The figure shows the cumulative sum of the negative OOB improvements as a function of the boosting iteration. As you can see, it tracks the test loss for the first hundred iterations but then diverges in a pessimistic way. The figure also shows the performance of 3-fold cross validation which usually gives a better estimate of the test loss but is computationally more demanding. """ print(__doc__) # Author: Peter Prettenhofer <peter.prettenhofer@gmail.com> # # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn import ensemble from sklearn.model_selection import KFold from sklearn.model_selection import train_test_split # Generate data (adapted from G. Ridgeway's gbm example) n_samples = 1000 random_state = np.random.RandomState(13) x1 = random_state.uniform(size=n_samples) x2 = random_state.uniform(size=n_samples) x3 = random_state.randint(0, 4, size=n_samples) p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3))) y = random_state.binomial(1, p, size=n_samples) X = np.c_[x1, x2, x3] X = X.astype(np.float32) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=9) # Fit classifier with out-of-bag estimates params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5, 'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3} clf = ensemble.GradientBoostingClassifier(**params) clf.fit(X_train, y_train) acc = clf.score(X_test, y_test) print("Accuracy: {:.4f}".format(acc)) n_estimators = params['n_estimators'] x = np.arange(n_estimators) + 1 def heldout_score(clf, X_test, y_test): """compute deviance scores on ``X_test`` and ``y_test``. """ score = np.zeros((n_estimators,), dtype=np.float64) for i, y_pred in enumerate(clf.staged_decision_function(X_test)): score[i] = clf.loss_(y_test, y_pred) return score def cv_estimate(n_folds=3): cv = KFold(n_folds=n_folds) cv_clf = ensemble.GradientBoostingClassifier(**params) val_scores = np.zeros((n_estimators,), dtype=np.float64) for train, test in cv.split(X_train, y_train): cv_clf.fit(X_train[train], y_train[train]) val_scores += heldout_score(cv_clf, X_train[test], y_train[test]) val_scores /= n_folds return val_scores # Estimate best n_estimator using cross-validation cv_score = cv_estimate(3) # Compute best n_estimator for test data test_score = heldout_score(clf, X_test, y_test) # negative cumulative sum of oob improvements cumsum = -np.cumsum(clf.oob_improvement_) # min loss according to OOB oob_best_iter = x[np.argmin(cumsum)] # min loss according to test (normalize such that first loss is 0) test_score -= test_score[0] test_best_iter = x[np.argmin(test_score)] # min loss according to cv (normalize such that first loss is 0) cv_score -= cv_score[0] cv_best_iter = x[np.argmin(cv_score)] # color brew for the three curves oob_color = list(map(lambda x: x / 256.0, (190, 174, 212))) test_color = list(map(lambda x: x / 256.0, (127, 201, 127))) cv_color = list(map(lambda x: x / 256.0, (253, 192, 134))) # plot curves and vertical lines for best iterations plt.plot(x, cumsum, label='OOB loss', color=oob_color) plt.plot(x, test_score, label='Test loss', color=test_color) plt.plot(x, cv_score, label='CV loss', color=cv_color) plt.axvline(x=oob_best_iter, color=oob_color) plt.axvline(x=test_best_iter, color=test_color) plt.axvline(x=cv_best_iter, color=cv_color) # add three vertical lines to xticks xticks = plt.xticks() xticks_pos = np.array(xticks[0].tolist() + [oob_best_iter, cv_best_iter, test_best_iter]) xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) + ['OOB', 'CV', 'Test']) ind = np.argsort(xticks_pos) xticks_pos = xticks_pos[ind] xticks_label = xticks_label[ind] plt.xticks(xticks_pos, xticks_label) plt.legend(loc='upper right') plt.ylabel('normalized loss') plt.xlabel('number of iterations') plt.show()
bsd-3-clause
massmutual/scikit-learn
sklearn/utils/estimator_checks.py
1
54609
from __future__ import print_function import types import warnings import sys import traceback import pickle from copy import deepcopy import numpy as np from scipy import sparse import struct from sklearn.externals.six.moves import zip from sklearn.externals.joblib import hash, Memory from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_raises_regex from sklearn.utils.testing import assert_raise_message from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_not_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_in from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_warns_message from sklearn.utils.testing import META_ESTIMATORS from sklearn.utils.testing import set_random_state from sklearn.utils.testing import assert_greater from sklearn.utils.testing import SkipTest from sklearn.utils.testing import ignore_warnings from sklearn.utils.testing import assert_warns from sklearn.base import (clone, ClassifierMixin, RegressorMixin, TransformerMixin, ClusterMixin, BaseEstimator) from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.random_projection import BaseRandomProjection from sklearn.feature_selection import SelectKBest from sklearn.svm.base import BaseLibSVM, BaseSVC from sklearn.pipeline import make_pipeline from sklearn.decomposition import NMF, ProjectedGradientNMF from sklearn.utils.validation import DataConversionWarning from sklearn.utils import ConvergenceWarning from sklearn.cross_validation import train_test_split from sklearn.utils import shuffle from sklearn.utils.fixes import signature from sklearn.preprocessing import StandardScaler from sklearn.datasets import load_iris, load_boston, make_blobs BOSTON = None CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD'] MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet', 'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcess', 'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso', 'LassoLars', 'LinearRegression', 'MultiTaskElasticNet', 'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV', 'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression', 'RANSACRegressor', 'RadiusNeighborsRegressor', 'RandomForestRegressor', 'Ridge', 'RidgeCV'] # Estimators with deprecated transform methods. Should be removed in 0.19 when # _LearntSelectorMixin is removed. DEPRECATED_TRANSFORM = [ "RandomForestClassifier", "RandomForestRegressor", "ExtraTreesClassifier", "ExtraTreesRegressor", "RandomTreesEmbedding", "DecisionTreeClassifier", "DecisionTreeRegressor", "ExtraTreeClassifier", "ExtraTreeRegressor", "LinearSVC", "SGDClassifier", "SGDRegressor", "Perceptron", "LogisticRegression", "LogisticRegressionCV", "GradientBoostingClassifier", "GradientBoostingRegressor"] def _yield_non_meta_checks(name, Estimator): yield check_estimators_dtypes yield check_fit_score_takes_y yield check_dtype_object yield check_estimators_fit_returns_self # Check that all estimator yield informative messages when # trained on empty datasets yield check_estimators_empty_data_messages if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']: # SpectralEmbedding is non-deterministic, # see issue #4236 # cross-decomposition's "transform" returns X and Y yield check_pipeline_consistency if name not in ['Imputer']: # Test that all estimators check their input for NaN's and infs yield check_estimators_nan_inf if name not in ['GaussianProcess']: # FIXME! # in particular GaussianProcess! yield check_estimators_overwrite_params if hasattr(Estimator, 'sparsify'): yield check_sparsify_coefficients yield check_estimator_sparse_data # Test that estimators can be pickled, and once pickled # give the same answer as before. yield check_estimators_pickle def _yield_classifier_checks(name, Classifier): # test classfiers can handle non-array data yield check_classifier_data_not_an_array # test classifiers trained on a single label always return this label yield check_classifiers_one_label yield check_classifiers_classes yield check_estimators_partial_fit_n_features # basic consistency testing yield check_classifiers_train if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"] # TODO some complication with -1 label and name not in ["DecisionTreeClassifier", "ExtraTreeClassifier"]): # We don't raise a warning in these classifiers, as # the column y interface is used by the forests. yield check_supervised_y_2d # test if NotFittedError is raised yield check_estimators_unfitted if 'class_weight' in Classifier().get_params().keys(): yield check_class_weight_classifiers def _yield_regressor_checks(name, Regressor): # TODO: test with intercept # TODO: test with multiple responses # basic testing yield check_regressors_train yield check_regressor_data_not_an_array yield check_estimators_partial_fit_n_features yield check_regressors_no_decision_function yield check_supervised_y_2d if name != 'CCA': # check that the regressor handles int input yield check_regressors_int # Test if NotFittedError is raised yield check_estimators_unfitted def _yield_transformer_checks(name, Transformer): # All transformers should either deal with sparse data or raise an # exception with type TypeError and an intelligible error message if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer', 'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']: yield check_transformer_data_not_an_array # these don't actually fit the data, so don't raise errors if name not in ['AdditiveChi2Sampler', 'Binarizer', 'FunctionTransformer', 'Normalizer']: # basic tests yield check_transformer_general yield check_transformers_unfitted def _yield_clustering_checks(name, Clusterer): yield check_clusterer_compute_labels_predict if name not in ('WardAgglomeration', "FeatureAgglomeration"): # this is clustering on the features # let's not test that here. yield check_clustering yield check_estimators_partial_fit_n_features def _yield_all_checks(name, Estimator): for check in _yield_non_meta_checks(name, Estimator): yield check if issubclass(Estimator, ClassifierMixin): for check in _yield_classifier_checks(name, Estimator): yield check if issubclass(Estimator, RegressorMixin): for check in _yield_regressor_checks(name, Estimator): yield check if issubclass(Estimator, TransformerMixin): if name not in DEPRECATED_TRANSFORM: for check in _yield_transformer_checks(name, Estimator): yield check if issubclass(Estimator, ClusterMixin): for check in _yield_clustering_checks(name, Estimator): yield check yield check_fit2d_predict1d yield check_fit2d_1sample yield check_fit2d_1feature yield check_fit1d_1feature yield check_fit1d_1sample def check_estimator(Estimator): """Check if estimator adheres to sklearn conventions. This estimator will run an extensive test-suite for input validation, shapes, etc. Additional tests for classifiers, regressors, clustering or transformers will be run if the Estimator class inherits from the corresponding mixin from sklearn.base. Parameters ---------- Estimator : class Class to check. """ name = Estimator.__class__.__name__ check_parameters_default_constructible(name, Estimator) for check in _yield_all_checks(name, Estimator): check(name, Estimator) def _boston_subset(n_samples=200): global BOSTON if BOSTON is None: boston = load_boston() X, y = boston.data, boston.target X, y = shuffle(X, y, random_state=0) X, y = X[:n_samples], y[:n_samples] X = StandardScaler().fit_transform(X) BOSTON = X, y return BOSTON def set_testing_parameters(estimator): # set parameters to speed up some estimators and # avoid deprecated behaviour params = estimator.get_params() if ("n_iter" in params and estimator.__class__.__name__ != "TSNE"): estimator.set_params(n_iter=5) if "max_iter" in params: warnings.simplefilter("ignore", ConvergenceWarning) if estimator.max_iter is not None: estimator.set_params(max_iter=min(5, estimator.max_iter)) # LinearSVR if estimator.__class__.__name__ == 'LinearSVR': estimator.set_params(max_iter=20) # NMF if estimator.__class__.__name__ == 'NMF': estimator.set_params(max_iter=100) if "n_resampling" in params: # randomized lasso estimator.set_params(n_resampling=5) if "n_estimators" in params: # especially gradient boosting with default 100 estimator.set_params(n_estimators=min(5, estimator.n_estimators)) if "max_trials" in params: # RANSAC estimator.set_params(max_trials=10) if "n_init" in params: # K-Means estimator.set_params(n_init=2) if "decision_function_shape" in params: # SVC estimator.set_params(decision_function_shape='ovo') if estimator.__class__.__name__ == "SelectFdr": # be tolerant of noisy datasets (not actually speed) estimator.set_params(alpha=.5) if estimator.__class__.__name__ == "TheilSenRegressor": estimator.max_subpopulation = 100 if isinstance(estimator, BaseRandomProjection): # Due to the jl lemma and often very few samples, the number # of components of the random matrix projection will be probably # greater than the number of features. # So we impose a smaller number (avoid "auto" mode) estimator.set_params(n_components=1) if isinstance(estimator, SelectKBest): # SelectKBest has a default of k=10 # which is more feature than we have in most case. estimator.set_params(k=1) if isinstance(estimator, NMF): if not isinstance(estimator, ProjectedGradientNMF): estimator.set_params(solver='cd') class NotAnArray(object): " An object that is convertable to an array" def __init__(self, data): self.data = data def __array__(self, dtype=None): return self.data def _is_32bit(): """Detect if process is 32bit Python.""" return struct.calcsize('P') * 8 == 32 def check_estimator_sparse_data(name, Estimator): rng = np.random.RandomState(0) X = rng.rand(40, 10) X[X < .8] = 0 X_csr = sparse.csr_matrix(X) y = (4 * rng.rand(40)).astype(np.int) for sparse_format in ['csr', 'csc', 'dok', 'lil', 'coo', 'dia', 'bsr']: X = X_csr.asformat(sparse_format) # catch deprecation warnings with warnings.catch_warnings(): if name in ['Scaler', 'StandardScaler']: estimator = Estimator(with_mean=False) else: estimator = Estimator() set_testing_parameters(estimator) # fit and predict try: estimator.fit(X, y) if hasattr(estimator, "predict"): pred = estimator.predict(X) assert_equal(pred.shape, (X.shape[0],)) if hasattr(estimator, 'predict_proba'): probs = estimator.predict_proba(X) assert_equal(probs.shape, (X.shape[0], 4)) except TypeError as e: if 'sparse' not in repr(e): print("Estimator %s doesn't seem to fail gracefully on " "sparse data: error message state explicitly that " "sparse input is not supported if this is not the case." % name) raise except Exception: print("Estimator %s doesn't seem to fail gracefully on " "sparse data: it should raise a TypeError if sparse input " "is explicitly not supported." % name) raise def check_dtype_object(name, Estimator): # check that estimators treat dtype object as numeric if possible rng = np.random.RandomState(0) X = rng.rand(40, 10).astype(object) y = (X[:, 0] * 4).astype(np.int) y = multioutput_estimator_convert_y_2d(name, y) with warnings.catch_warnings(): estimator = Estimator() set_testing_parameters(estimator) estimator.fit(X, y) if hasattr(estimator, "predict"): estimator.predict(X) if (hasattr(estimator, "transform") and name not in DEPRECATED_TRANSFORM): estimator.transform(X) try: estimator.fit(X, y.astype(object)) except Exception as e: if "Unknown label type" not in str(e): raise X[0, 0] = {'foo': 'bar'} msg = "argument must be a string or a number" assert_raises_regex(TypeError, msg, estimator.fit, X, y) @ignore_warnings def check_fit2d_predict1d(name, Estimator): # check by fitting a 2d array and prediting with a 1d array rnd = np.random.RandomState(0) X = 3 * rnd.uniform(size=(20, 3)) y = X[:, 0].astype(np.int) y = multioutput_estimator_convert_y_2d(name, y) estimator = Estimator() set_testing_parameters(estimator) if hasattr(estimator, "n_components"): estimator.n_components = 1 if hasattr(estimator, "n_clusters"): estimator.n_clusters = 1 set_random_state(estimator, 1) estimator.fit(X, y) for method in ["predict", "transform", "decision_function", "predict_proba"]: if hasattr(estimator, method): try: assert_warns(DeprecationWarning, getattr(estimator, method), X[0]) except ValueError: pass @ignore_warnings def check_fit2d_1sample(name, Estimator): # check by fitting a 2d array and prediting with a 1d array rnd = np.random.RandomState(0) X = 3 * rnd.uniform(size=(1, 10)) y = X[:, 0].astype(np.int) y = multioutput_estimator_convert_y_2d(name, y) estimator = Estimator() set_testing_parameters(estimator) if hasattr(estimator, "n_components"): estimator.n_components = 1 if hasattr(estimator, "n_clusters"): estimator.n_clusters = 1 set_random_state(estimator, 1) try: estimator.fit(X, y) except ValueError: pass @ignore_warnings def check_fit2d_1feature(name, Estimator): # check by fitting a 2d array and prediting with a 1d array rnd = np.random.RandomState(0) X = 3 * rnd.uniform(size=(10, 1)) y = X[:, 0].astype(np.int) y = multioutput_estimator_convert_y_2d(name, y) estimator = Estimator() set_testing_parameters(estimator) if hasattr(estimator, "n_components"): estimator.n_components = 1 if hasattr(estimator, "n_clusters"): estimator.n_clusters = 1 set_random_state(estimator, 1) try: estimator.fit(X, y) except ValueError: pass @ignore_warnings def check_fit1d_1feature(name, Estimator): # check fitting 1d array with 1 feature rnd = np.random.RandomState(0) X = 3 * rnd.uniform(size=(20)) y = X.astype(np.int) y = multioutput_estimator_convert_y_2d(name, y) estimator = Estimator() set_testing_parameters(estimator) if hasattr(estimator, "n_components"): estimator.n_components = 1 if hasattr(estimator, "n_clusters"): estimator.n_clusters = 1 set_random_state(estimator, 1) try: estimator.fit(X, y) except ValueError: pass @ignore_warnings def check_fit1d_1sample(name, Estimator): # check fitting 1d array with 1 feature rnd = np.random.RandomState(0) X = 3 * rnd.uniform(size=(20)) y = np.array([1]) y = multioutput_estimator_convert_y_2d(name, y) estimator = Estimator() set_testing_parameters(estimator) if hasattr(estimator, "n_components"): estimator.n_components = 1 if hasattr(estimator, "n_clusters"): estimator.n_clusters = 1 set_random_state(estimator, 1) try: estimator.fit(X, y) except ValueError : pass def check_transformer_general(name, Transformer): X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, n_features=2, cluster_std=0.1) X = StandardScaler().fit_transform(X) X -= X.min() _check_transformer(name, Transformer, X, y) _check_transformer(name, Transformer, X.tolist(), y.tolist()) def check_transformer_data_not_an_array(name, Transformer): X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, n_features=2, cluster_std=0.1) X = StandardScaler().fit_transform(X) # We need to make sure that we have non negative data, for things # like NMF X -= X.min() - .1 this_X = NotAnArray(X) this_y = NotAnArray(np.asarray(y)) _check_transformer(name, Transformer, this_X, this_y) def check_transformers_unfitted(name, Transformer): X, y = _boston_subset() with warnings.catch_warnings(record=True): transformer = Transformer() assert_raises((AttributeError, ValueError), transformer.transform, X) def _check_transformer(name, Transformer, X, y): if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit(): # Those transformers yield non-deterministic output when executed on # a 32bit Python. The same transformers are stable on 64bit Python. # FIXME: try to isolate a minimalistic reproduction case only depending # on numpy & scipy and/or maybe generate a test dataset that does not # cause such unstable behaviors. msg = name + ' is non deterministic on 32bit Python' raise SkipTest(msg) n_samples, n_features = np.asarray(X).shape # catch deprecation warnings with warnings.catch_warnings(record=True): transformer = Transformer() set_random_state(transformer) set_testing_parameters(transformer) # fit if name in CROSS_DECOMPOSITION: y_ = np.c_[y, y] y_[::2, 1] *= 2 else: y_ = y transformer.fit(X, y_) X_pred = transformer.fit_transform(X, y=y_) if isinstance(X_pred, tuple): for x_pred in X_pred: assert_equal(x_pred.shape[0], n_samples) else: # check for consistent n_samples assert_equal(X_pred.shape[0], n_samples) if hasattr(transformer, 'transform'): if name in CROSS_DECOMPOSITION: X_pred2 = transformer.transform(X, y_) X_pred3 = transformer.fit_transform(X, y=y_) else: X_pred2 = transformer.transform(X) X_pred3 = transformer.fit_transform(X, y=y_) if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple): for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3): assert_array_almost_equal( x_pred, x_pred2, 2, "fit_transform and transform outcomes not consistent in %s" % Transformer) assert_array_almost_equal( x_pred, x_pred3, 2, "consecutive fit_transform outcomes not consistent in %s" % Transformer) else: assert_array_almost_equal( X_pred, X_pred2, 2, "fit_transform and transform outcomes not consistent in %s" % Transformer) assert_array_almost_equal( X_pred, X_pred3, 2, "consecutive fit_transform outcomes not consistent in %s" % Transformer) assert_equal(len(X_pred2), n_samples) assert_equal(len(X_pred3), n_samples) # raises error on malformed input for transform if hasattr(X, 'T'): # If it's not an array, it does not have a 'T' property assert_raises(ValueError, transformer.transform, X.T) @ignore_warnings def check_pipeline_consistency(name, Estimator): if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit(): # Those transformers yield non-deterministic output when executed on # a 32bit Python. The same transformers are stable on 64bit Python. # FIXME: try to isolate a minimalistic reproduction case only depending # scipy and/or maybe generate a test dataset that does not # cause such unstable behaviors. msg = name + ' is non deterministic on 32bit Python' raise SkipTest(msg) # check that make_pipeline(est) gives same score as est X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, n_features=2, cluster_std=0.1) X -= X.min() y = multioutput_estimator_convert_y_2d(name, y) estimator = Estimator() set_testing_parameters(estimator) set_random_state(estimator) pipeline = make_pipeline(estimator) estimator.fit(X, y) pipeline.fit(X, y) if name in DEPRECATED_TRANSFORM: funcs = ["score"] else: funcs = ["score", "fit_transform"] for func_name in funcs: func = getattr(estimator, func_name, None) if func is not None: func_pipeline = getattr(pipeline, func_name) result = func(X, y) result_pipe = func_pipeline(X, y) assert_array_almost_equal(result, result_pipe) @ignore_warnings def check_fit_score_takes_y(name, Estimator): # check that all estimators accept an optional y # in fit and score so they can be used in pipelines rnd = np.random.RandomState(0) X = rnd.uniform(size=(10, 3)) y = np.arange(10) % 3 y = multioutput_estimator_convert_y_2d(name, y) estimator = Estimator() set_testing_parameters(estimator) set_random_state(estimator) if name in DEPRECATED_TRANSFORM: funcs = ["fit", "score", "partial_fit", "fit_predict"] else: funcs = [ "fit", "score", "partial_fit", "fit_predict", "fit_transform"] for func_name in funcs: func = getattr(estimator, func_name, None) if func is not None: func(X, y) args = [p.name for p in signature(func).parameters.values()] assert_true(args[1] in ["y", "Y"], "Expected y or Y as second argument for method " "%s of %s. Got arguments: %r." % (func_name, Estimator.__name__, args)) @ignore_warnings def check_estimators_dtypes(name, Estimator): rnd = np.random.RandomState(0) X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32) X_train_64 = X_train_32.astype(np.float64) X_train_int_64 = X_train_32.astype(np.int64) X_train_int_32 = X_train_32.astype(np.int32) y = X_train_int_64[:, 0] y = multioutput_estimator_convert_y_2d(name, y) if name in DEPRECATED_TRANSFORM: methods = ["predict", "decision_function", "predict_proba"] else: methods = [ "predict", "transform", "decision_function", "predict_proba"] for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]: with warnings.catch_warnings(record=True): estimator = Estimator() set_testing_parameters(estimator) set_random_state(estimator, 1) estimator.fit(X_train, y) for method in methods: if hasattr(estimator, method): getattr(estimator, method)(X_train) def check_estimators_empty_data_messages(name, Estimator): e = Estimator() set_testing_parameters(e) set_random_state(e, 1) X_zero_samples = np.empty(0).reshape(0, 3) # The precise message can change depending on whether X or y is # validated first. Let us test the type of exception only: assert_raises(ValueError, e.fit, X_zero_samples, []) X_zero_features = np.empty(0).reshape(3, 0) # the following y should be accepted by both classifiers and regressors # and ignored by unsupervised models y = multioutput_estimator_convert_y_2d(name, np.array([1, 0, 1])) msg = "0 feature\(s\) \(shape=\(3, 0\)\) while a minimum of \d* is required." assert_raises_regex(ValueError, msg, e.fit, X_zero_features, y) def check_estimators_nan_inf(name, Estimator): rnd = np.random.RandomState(0) X_train_finite = rnd.uniform(size=(10, 3)) X_train_nan = rnd.uniform(size=(10, 3)) X_train_nan[0, 0] = np.nan X_train_inf = rnd.uniform(size=(10, 3)) X_train_inf[0, 0] = np.inf y = np.ones(10) y[:5] = 0 y = multioutput_estimator_convert_y_2d(name, y) error_string_fit = "Estimator doesn't check for NaN and inf in fit." error_string_predict = ("Estimator doesn't check for NaN and inf in" " predict.") error_string_transform = ("Estimator doesn't check for NaN and inf in" " transform.") for X_train in [X_train_nan, X_train_inf]: # catch deprecation warnings with warnings.catch_warnings(record=True): estimator = Estimator() set_testing_parameters(estimator) set_random_state(estimator, 1) # try to fit try: estimator.fit(X_train, y) except ValueError as e: if 'inf' not in repr(e) and 'NaN' not in repr(e): print(error_string_fit, Estimator, e) traceback.print_exc(file=sys.stdout) raise e except Exception as exc: print(error_string_fit, Estimator, exc) traceback.print_exc(file=sys.stdout) raise exc else: raise AssertionError(error_string_fit, Estimator) # actually fit estimator.fit(X_train_finite, y) # predict if hasattr(estimator, "predict"): try: estimator.predict(X_train) except ValueError as e: if 'inf' not in repr(e) and 'NaN' not in repr(e): print(error_string_predict, Estimator, e) traceback.print_exc(file=sys.stdout) raise e except Exception as exc: print(error_string_predict, Estimator, exc) traceback.print_exc(file=sys.stdout) else: raise AssertionError(error_string_predict, Estimator) # transform if (hasattr(estimator, "transform") and name not in DEPRECATED_TRANSFORM): try: estimator.transform(X_train) except ValueError as e: if 'inf' not in repr(e) and 'NaN' not in repr(e): print(error_string_transform, Estimator, e) traceback.print_exc(file=sys.stdout) raise e except Exception as exc: print(error_string_transform, Estimator, exc) traceback.print_exc(file=sys.stdout) else: raise AssertionError(error_string_transform, Estimator) @ignore_warnings def check_estimators_pickle(name, Estimator): """Test that we can pickle all estimators""" if name in DEPRECATED_TRANSFORM: check_methods = ["predict", "decision_function", "predict_proba"] else: check_methods = ["predict", "transform", "decision_function", "predict_proba"] X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, n_features=2, cluster_std=0.1) # some estimators can't do features less than 0 X -= X.min() # some estimators only take multioutputs y = multioutput_estimator_convert_y_2d(name, y) # catch deprecation warnings with warnings.catch_warnings(record=True): estimator = Estimator() set_random_state(estimator) set_testing_parameters(estimator) estimator.fit(X, y) result = dict() for method in check_methods: if hasattr(estimator, method): result[method] = getattr(estimator, method)(X) # pickle and unpickle! pickled_estimator = pickle.dumps(estimator) unpickled_estimator = pickle.loads(pickled_estimator) for method in result: unpickled_result = getattr(unpickled_estimator, method)(X) assert_array_almost_equal(result[method], unpickled_result) def check_estimators_partial_fit_n_features(name, Alg): # check if number of features changes between calls to partial_fit. if not hasattr(Alg, 'partial_fit'): return X, y = make_blobs(n_samples=50, random_state=1) X -= X.min() with warnings.catch_warnings(record=True): alg = Alg() set_testing_parameters(alg) if isinstance(alg, ClassifierMixin): classes = np.unique(y) alg.partial_fit(X, y, classes=classes) else: alg.partial_fit(X, y) assert_raises(ValueError, alg.partial_fit, X[:, :-1], y) def check_clustering(name, Alg): X, y = make_blobs(n_samples=50, random_state=1) X, y = shuffle(X, y, random_state=7) X = StandardScaler().fit_transform(X) n_samples, n_features = X.shape # catch deprecation and neighbors warnings with warnings.catch_warnings(record=True): alg = Alg() set_testing_parameters(alg) if hasattr(alg, "n_clusters"): alg.set_params(n_clusters=3) set_random_state(alg) if name == 'AffinityPropagation': alg.set_params(preference=-100) alg.set_params(max_iter=100) # fit alg.fit(X) # with lists alg.fit(X.tolist()) assert_equal(alg.labels_.shape, (n_samples,)) pred = alg.labels_ assert_greater(adjusted_rand_score(pred, y), 0.4) # fit another time with ``fit_predict`` and compare results if name is 'SpectralClustering': # there is no way to make Spectral clustering deterministic :( return set_random_state(alg) with warnings.catch_warnings(record=True): pred2 = alg.fit_predict(X) assert_array_equal(pred, pred2) def check_clusterer_compute_labels_predict(name, Clusterer): """Check that predict is invariant of compute_labels""" X, y = make_blobs(n_samples=20, random_state=0) clusterer = Clusterer() if hasattr(clusterer, "compute_labels"): # MiniBatchKMeans if hasattr(clusterer, "random_state"): clusterer.set_params(random_state=0) X_pred1 = clusterer.fit(X).predict(X) clusterer.set_params(compute_labels=False) X_pred2 = clusterer.fit(X).predict(X) assert_array_equal(X_pred1, X_pred2) def check_classifiers_one_label(name, Classifier): error_string_fit = "Classifier can't train when only one class is present." error_string_predict = ("Classifier can't predict when only one class is " "present.") rnd = np.random.RandomState(0) X_train = rnd.uniform(size=(10, 3)) X_test = rnd.uniform(size=(10, 3)) y = np.ones(10) # catch deprecation warnings with warnings.catch_warnings(record=True): classifier = Classifier() set_testing_parameters(classifier) # try to fit try: classifier.fit(X_train, y) except ValueError as e: if 'class' not in repr(e): print(error_string_fit, Classifier, e) traceback.print_exc(file=sys.stdout) raise e else: return except Exception as exc: print(error_string_fit, Classifier, exc) traceback.print_exc(file=sys.stdout) raise exc # predict try: assert_array_equal(classifier.predict(X_test), y) except Exception as exc: print(error_string_predict, Classifier, exc) raise exc @ignore_warnings # Warnings are raised by decision function def check_classifiers_train(name, Classifier): X_m, y_m = make_blobs(n_samples=300, random_state=0) X_m, y_m = shuffle(X_m, y_m, random_state=7) X_m = StandardScaler().fit_transform(X_m) # generate binary problem from multi-class one y_b = y_m[y_m != 2] X_b = X_m[y_m != 2] for (X, y) in [(X_m, y_m), (X_b, y_b)]: # catch deprecation warnings classes = np.unique(y) n_classes = len(classes) n_samples, n_features = X.shape with warnings.catch_warnings(record=True): classifier = Classifier() if name in ['BernoulliNB', 'MultinomialNB']: X -= X.min() set_testing_parameters(classifier) set_random_state(classifier) # raises error on malformed input for fit assert_raises(ValueError, classifier.fit, X, y[:-1]) # fit classifier.fit(X, y) # with lists classifier.fit(X.tolist(), y.tolist()) assert_true(hasattr(classifier, "classes_")) y_pred = classifier.predict(X) assert_equal(y_pred.shape, (n_samples,)) # training set performance if name not in ['BernoulliNB', 'MultinomialNB']: assert_greater(accuracy_score(y, y_pred), 0.83) # raises error on malformed input for predict assert_raises(ValueError, classifier.predict, X.T) if hasattr(classifier, "decision_function"): try: # decision_function agrees with predict decision = classifier.decision_function(X) if n_classes is 2: assert_equal(decision.shape, (n_samples,)) dec_pred = (decision.ravel() > 0).astype(np.int) assert_array_equal(dec_pred, y_pred) if (n_classes is 3 and not isinstance(classifier, BaseLibSVM)): # 1on1 of LibSVM works differently assert_equal(decision.shape, (n_samples, n_classes)) assert_array_equal(np.argmax(decision, axis=1), y_pred) # raises error on malformed input assert_raises(ValueError, classifier.decision_function, X.T) # raises error on malformed input for decision_function assert_raises(ValueError, classifier.decision_function, X.T) except NotImplementedError: pass if hasattr(classifier, "predict_proba"): # predict_proba agrees with predict y_prob = classifier.predict_proba(X) assert_equal(y_prob.shape, (n_samples, n_classes)) assert_array_equal(np.argmax(y_prob, axis=1), y_pred) # check that probas for all classes sum to one assert_array_almost_equal(np.sum(y_prob, axis=1), np.ones(n_samples)) # raises error on malformed input assert_raises(ValueError, classifier.predict_proba, X.T) # raises error on malformed input for predict_proba assert_raises(ValueError, classifier.predict_proba, X.T) def check_estimators_fit_returns_self(name, Estimator): """Check if self is returned when calling fit""" X, y = make_blobs(random_state=0, n_samples=9, n_features=4) y = multioutput_estimator_convert_y_2d(name, y) # some want non-negative input X -= X.min() estimator = Estimator() set_testing_parameters(estimator) set_random_state(estimator) assert_true(estimator.fit(X, y) is estimator) @ignore_warnings def check_estimators_unfitted(name, Estimator): """Check that predict raises an exception in an unfitted estimator. Unfitted estimators should raise either AttributeError or ValueError. The specific exception type NotFittedError inherits from both and can therefore be adequately raised for that purpose. """ # Common test for Regressors as well as Classifiers X, y = _boston_subset() with warnings.catch_warnings(record=True): est = Estimator() msg = "fit" if hasattr(est, 'predict'): assert_raise_message((AttributeError, ValueError), msg, est.predict, X) if hasattr(est, 'decision_function'): assert_raise_message((AttributeError, ValueError), msg, est.decision_function, X) if hasattr(est, 'predict_proba'): assert_raise_message((AttributeError, ValueError), msg, est.predict_proba, X) if hasattr(est, 'predict_log_proba'): assert_raise_message((AttributeError, ValueError), msg, est.predict_log_proba, X) def check_supervised_y_2d(name, Estimator): if "MultiTask" in name: # These only work on 2d, so this test makes no sense return rnd = np.random.RandomState(0) X = rnd.uniform(size=(10, 3)) y = np.arange(10) % 3 # catch deprecation warnings with warnings.catch_warnings(record=True): estimator = Estimator() set_testing_parameters(estimator) set_random_state(estimator) # fit estimator.fit(X, y) y_pred = estimator.predict(X) set_random_state(estimator) # Check that when a 2D y is given, a DataConversionWarning is # raised with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always", DataConversionWarning) warnings.simplefilter("ignore", RuntimeWarning) estimator.fit(X, y[:, np.newaxis]) y_pred_2d = estimator.predict(X) msg = "expected 1 DataConversionWarning, got: %s" % ( ", ".join([str(w_x) for w_x in w])) if name not in MULTI_OUTPUT: # check that we warned if we don't support multi-output assert_greater(len(w), 0, msg) assert_true("DataConversionWarning('A column-vector y" " was passed when a 1d array was expected" in msg) assert_array_almost_equal(y_pred.ravel(), y_pred_2d.ravel()) def check_classifiers_classes(name, Classifier): X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1) X, y = shuffle(X, y, random_state=7) X = StandardScaler().fit_transform(X) # We need to make sure that we have non negative data, for things # like NMF X -= X.min() - .1 y_names = np.array(["one", "two", "three"])[y] for y_names in [y_names, y_names.astype('O')]: if name in ["LabelPropagation", "LabelSpreading"]: # TODO some complication with -1 label y_ = y else: y_ = y_names classes = np.unique(y_) # catch deprecation warnings with warnings.catch_warnings(record=True): classifier = Classifier() if name == 'BernoulliNB': classifier.set_params(binarize=X.mean()) set_testing_parameters(classifier) set_random_state(classifier) # fit classifier.fit(X, y_) y_pred = classifier.predict(X) # training set performance assert_array_equal(np.unique(y_), np.unique(y_pred)) if np.any(classifier.classes_ != classes): print("Unexpected classes_ attribute for %r: " "expected %s, got %s" % (classifier, classes, classifier.classes_)) def check_regressors_int(name, Regressor): X, _ = _boston_subset() X = X[:50] rnd = np.random.RandomState(0) y = rnd.randint(3, size=X.shape[0]) y = multioutput_estimator_convert_y_2d(name, y) rnd = np.random.RandomState(0) # catch deprecation warnings with warnings.catch_warnings(record=True): # separate estimators to control random seeds regressor_1 = Regressor() regressor_2 = Regressor() set_testing_parameters(regressor_1) set_testing_parameters(regressor_2) set_random_state(regressor_1) set_random_state(regressor_2) if name in CROSS_DECOMPOSITION: y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))]) y_ = y_.T else: y_ = y # fit regressor_1.fit(X, y_) pred1 = regressor_1.predict(X) regressor_2.fit(X, y_.astype(np.float)) pred2 = regressor_2.predict(X) assert_array_almost_equal(pred1, pred2, 2, name) def check_regressors_train(name, Regressor): X, y = _boston_subset() y = StandardScaler().fit_transform(y.reshape(-1, 1)) # X is already scaled y = y.ravel() y = multioutput_estimator_convert_y_2d(name, y) rnd = np.random.RandomState(0) # catch deprecation warnings with warnings.catch_warnings(record=True): regressor = Regressor() set_testing_parameters(regressor) if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'): # linear regressors need to set alpha, but not generalized CV ones regressor.alpha = 0.01 if name == 'PassiveAggressiveRegressor': regressor.C = 0.01 # raises error on malformed input for fit assert_raises(ValueError, regressor.fit, X, y[:-1]) # fit if name in CROSS_DECOMPOSITION: y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))]) y_ = y_.T else: y_ = y set_random_state(regressor) regressor.fit(X, y_) regressor.fit(X.tolist(), y_.tolist()) y_pred = regressor.predict(X) assert_equal(y_pred.shape, y_.shape) # TODO: find out why PLS and CCA fail. RANSAC is random # and furthermore assumes the presence of outliers, hence # skipped if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'): print(regressor) assert_greater(regressor.score(X, y_), 0.5) @ignore_warnings def check_regressors_no_decision_function(name, Regressor): # checks whether regressors have decision_function or predict_proba rng = np.random.RandomState(0) X = rng.normal(size=(10, 4)) y = multioutput_estimator_convert_y_2d(name, X[:, 0]) regressor = Regressor() set_testing_parameters(regressor) if hasattr(regressor, "n_components"): # FIXME CCA, PLS is not robust to rank 1 effects regressor.n_components = 1 regressor.fit(X, y) funcs = ["decision_function", "predict_proba", "predict_log_proba"] for func_name in funcs: func = getattr(regressor, func_name, None) if func is None: # doesn't have function continue # has function. Should raise deprecation warning msg = func_name assert_warns_message(DeprecationWarning, msg, func, X) def check_class_weight_classifiers(name, Classifier): if name == "NuSVC": # the sparse version has a parameter that doesn't do anything raise SkipTest if name.endswith("NB"): # NaiveBayes classifiers have a somewhat different interface. # FIXME SOON! raise SkipTest for n_centers in [2, 3]: # create a very noisy dataset X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5, random_state=0) n_centers = len(np.unique(y_train)) if n_centers == 2: class_weight = {0: 1000, 1: 0.0001} else: class_weight = {0: 1000, 1: 0.0001, 2: 0.0001} with warnings.catch_warnings(record=True): classifier = Classifier(class_weight=class_weight) if hasattr(classifier, "n_iter"): classifier.set_params(n_iter=100) if hasattr(classifier, "min_weight_fraction_leaf"): classifier.set_params(min_weight_fraction_leaf=0.01) set_random_state(classifier) classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) assert_greater(np.mean(y_pred == 0), 0.89) def check_class_weight_balanced_classifiers(name, Classifier, X_train, y_train, X_test, y_test, weights): with warnings.catch_warnings(record=True): classifier = Classifier() if hasattr(classifier, "n_iter"): classifier.set_params(n_iter=100) set_random_state(classifier) classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) classifier.set_params(class_weight='balanced') classifier.fit(X_train, y_train) y_pred_balanced = classifier.predict(X_test) assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'), f1_score(y_test, y_pred, average='weighted')) def check_class_weight_balanced_linear_classifier(name, Classifier): """Test class weights with non-contiguous class labels.""" X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0], [1.0, 0.0]]) y = np.array([1, 1, 1, -1, -1]) with warnings.catch_warnings(record=True): classifier = Classifier() if hasattr(classifier, "n_iter"): # This is a very small dataset, default n_iter are likely to prevent # convergence classifier.set_params(n_iter=1000) set_random_state(classifier) # Let the model compute the class frequencies classifier.set_params(class_weight='balanced') coef_balanced = classifier.fit(X, y).coef_.copy() # Count each label occurrence to reweight manually n_samples = len(y) n_classes = float(len(np.unique(y))) class_weight = {1: n_samples / (np.sum(y == 1) * n_classes), -1: n_samples / (np.sum(y == -1) * n_classes)} classifier.set_params(class_weight=class_weight) coef_manual = classifier.fit(X, y).coef_.copy() assert_array_almost_equal(coef_balanced, coef_manual) def check_estimators_overwrite_params(name, Estimator): X, y = make_blobs(random_state=0, n_samples=9) y = multioutput_estimator_convert_y_2d(name, y) # some want non-negative input X -= X.min() with warnings.catch_warnings(record=True): # catch deprecation warnings estimator = Estimator() set_testing_parameters(estimator) set_random_state(estimator) # Make a physical copy of the orginal estimator parameters before fitting. params = estimator.get_params() original_params = deepcopy(params) # Fit the model estimator.fit(X, y) # Compare the state of the model parameters with the original parameters new_params = estimator.get_params() for param_name, original_value in original_params.items(): new_value = new_params[param_name] # We should never change or mutate the internal state of input # parameters by default. To check this we use the joblib.hash function # that introspects recursively any subobjects to compute a checksum. # The only exception to this rule of immutable constructor parameters # is possible RandomState instance but in this check we explicitly # fixed the random_state params recursively to be integer seeds. assert_equal(hash(new_value), hash(original_value), "Estimator %s should not change or mutate " " the parameter %s from %s to %s during fit." % (name, param_name, original_value, new_value)) def check_sparsify_coefficients(name, Estimator): X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-1, -2], [2, 2], [-2, -2]]) y = [1, 1, 1, 2, 2, 2, 3, 3, 3] est = Estimator() est.fit(X, y) pred_orig = est.predict(X) # test sparsify with dense inputs est.sparsify() assert_true(sparse.issparse(est.coef_)) pred = est.predict(X) assert_array_equal(pred, pred_orig) # pickle and unpickle with sparse coef_ est = pickle.loads(pickle.dumps(est)) assert_true(sparse.issparse(est.coef_)) pred = est.predict(X) assert_array_equal(pred, pred_orig) def check_classifier_data_not_an_array(name, Estimator): X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]]) y = [1, 1, 1, 2, 2, 2] y = multioutput_estimator_convert_y_2d(name, y) check_estimators_data_not_an_array(name, Estimator, X, y) def check_regressor_data_not_an_array(name, Estimator): X, y = _boston_subset(n_samples=50) y = multioutput_estimator_convert_y_2d(name, y) check_estimators_data_not_an_array(name, Estimator, X, y) def check_estimators_data_not_an_array(name, Estimator, X, y): if name in CROSS_DECOMPOSITION: raise SkipTest # catch deprecation warnings with warnings.catch_warnings(record=True): # separate estimators to control random seeds estimator_1 = Estimator() estimator_2 = Estimator() set_testing_parameters(estimator_1) set_testing_parameters(estimator_2) set_random_state(estimator_1) set_random_state(estimator_2) y_ = NotAnArray(np.asarray(y)) X_ = NotAnArray(np.asarray(X)) # fit estimator_1.fit(X_, y_) pred1 = estimator_1.predict(X_) estimator_2.fit(X, y) pred2 = estimator_2.predict(X) assert_array_almost_equal(pred1, pred2, 2, name) def check_parameters_default_constructible(name, Estimator): classifier = LinearDiscriminantAnalysis() # test default-constructibility # get rid of deprecation warnings with warnings.catch_warnings(record=True): if name in META_ESTIMATORS: estimator = Estimator(classifier) else: estimator = Estimator() # test cloning clone(estimator) # test __repr__ repr(estimator) # test that set_params returns self assert_true(estimator.set_params() is estimator) # test if init does nothing but set parameters # this is important for grid_search etc. # We get the default parameters from init and then # compare these against the actual values of the attributes. # this comes from getattr. Gets rid of deprecation decorator. init = getattr(estimator.__init__, 'deprecated_original', estimator.__init__) try: def param_filter(p): """Identify hyper parameters of an estimator""" return (p.name != 'self' and p.kind != p.VAR_KEYWORD and p.kind != p.VAR_POSITIONAL) init_params = [p for p in signature(init).parameters.values() if param_filter(p)] except (TypeError, ValueError): # init is not a python function. # true for mixins return params = estimator.get_params() if name in META_ESTIMATORS: # they can need a non-default argument init_params = init_params[1:] for init_param in init_params: assert_not_equal(init_param.default, init_param.empty, "parameter %s for %s has no default value" % (init_param.name, type(estimator).__name__)) assert_in(type(init_param.default), [str, int, float, bool, tuple, type(None), np.float64, types.FunctionType, Memory]) if init_param.name not in params.keys(): # deprecated parameter, not in get_params assert_true(init_param.default is None) continue param_value = params[init_param.name] if isinstance(param_value, np.ndarray): assert_array_equal(param_value, init_param.default) else: assert_equal(param_value, init_param.default) def multioutput_estimator_convert_y_2d(name, y): # Estimators in mono_output_task_error raise ValueError if y is of 1-D # Convert into a 2-D y for those estimators. if name in (['MultiTaskElasticNetCV', 'MultiTaskLassoCV', 'MultiTaskLasso', 'MultiTaskElasticNet']): return y[:, np.newaxis] return y def check_non_transformer_estimators_n_iter(name, estimator, multi_output=False): # Check if all iterative solvers, run for more than one iteratiom iris = load_iris() X, y_ = iris.data, iris.target if multi_output: y_ = y_[:, np.newaxis] set_random_state(estimator, 0) if name == 'AffinityPropagation': estimator.fit(X) else: estimator.fit(X, y_) assert_greater(estimator.n_iter_, 0) def check_transformer_n_iter(name, estimator): if name in CROSS_DECOMPOSITION: # Check using default data X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]] y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]] else: X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, n_features=2, cluster_std=0.1) X -= X.min() - 0.1 set_random_state(estimator, 0) estimator.fit(X, y_) # These return a n_iter per component. if name in CROSS_DECOMPOSITION: for iter_ in estimator.n_iter_: assert_greater(iter_, 1) else: assert_greater(estimator.n_iter_, 1) def check_get_params_invariance(name, estimator): class T(BaseEstimator): """Mock classifier """ def __init__(self): pass def fit(self, X, y): return self if name in ('FeatureUnion', 'Pipeline'): e = estimator([('clf', T())]) elif name in ('GridSearchCV', 'RandomizedSearchCV', 'SelectFromModel'): return else: e = estimator() shallow_params = e.get_params(deep=False) deep_params = e.get_params(deep=True) assert_true(all(item in deep_params.items() for item in shallow_params.items()))
bsd-3-clause
hrjn/scikit-learn
examples/cluster/plot_birch_vs_minibatchkmeans.py
333
3694
""" ================================= Compare BIRCH and MiniBatchKMeans ================================= This example compares the timing of Birch (with and without the global clustering step) and MiniBatchKMeans on a synthetic dataset having 100,000 samples and 2 features generated using make_blobs. If ``n_clusters`` is set to None, the data is reduced from 100,000 samples to a set of 158 clusters. This can be viewed as a preprocessing step before the final (global) clustering step that further reduces these 158 clusters to 100 clusters. """ # Authors: Manoj Kumar <manojkumarsivaraj334@gmail.com # Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr> # License: BSD 3 clause print(__doc__) from itertools import cycle from time import time import numpy as np import matplotlib.pyplot as plt import matplotlib.colors as colors from sklearn.preprocessing import StandardScaler from sklearn.cluster import Birch, MiniBatchKMeans from sklearn.datasets.samples_generator import make_blobs # Generate centers for the blobs so that it forms a 10 X 10 grid. xx = np.linspace(-22, 22, 10) yy = np.linspace(-22, 22, 10) xx, yy = np.meshgrid(xx, yy) n_centres = np.hstack((np.ravel(xx)[:, np.newaxis], np.ravel(yy)[:, np.newaxis])) # Generate blobs to do a comparison between MiniBatchKMeans and Birch. X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0) # Use all colors that matplotlib provides by default. colors_ = cycle(colors.cnames.keys()) fig = plt.figure(figsize=(12, 4)) fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9) # Compute clustering with Birch with and without the final clustering step # and plot. birch_models = [Birch(threshold=1.7, n_clusters=None), Birch(threshold=1.7, n_clusters=100)] final_step = ['without global clustering', 'with global clustering'] for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)): t = time() birch_model.fit(X) time_ = time() - t print("Birch %s as the final step took %0.2f seconds" % ( info, (time() - t))) # Plot result labels = birch_model.labels_ centroids = birch_model.subcluster_centers_ n_clusters = np.unique(labels).size print("n_clusters : %d" % n_clusters) ax = fig.add_subplot(1, 3, ind + 1) for this_centroid, k, col in zip(centroids, range(n_clusters), colors_): mask = labels == k ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.') if birch_model.n_clusters is None: ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col, markeredgecolor='k', markersize=5) ax.set_ylim([-25, 25]) ax.set_xlim([-25, 25]) ax.set_autoscaley_on(False) ax.set_title('Birch %s' % info) # Compute clustering with MiniBatchKMeans. mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100, n_init=10, max_no_improvement=10, verbose=0, random_state=0) t0 = time() mbk.fit(X) t_mini_batch = time() - t0 print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch) mbk_means_labels_unique = np.unique(mbk.labels_) ax = fig.add_subplot(1, 3, 3) for this_centroid, k, col in zip(mbk.cluster_centers_, range(n_clusters), colors_): mask = mbk.labels_ == k ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.') ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k', markersize=5) ax.set_xlim([-25, 25]) ax.set_ylim([-25, 25]) ax.set_title("MiniBatchKMeans") ax.set_autoscaley_on(False) plt.show()
bsd-3-clause
liberatorqjw/scikit-learn
sklearn/tests/test_multiclass.py
8
21910
import numpy as np import scipy.sparse as sp from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_false from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_warns from sklearn.utils.testing import ignore_warnings from sklearn.utils.testing import assert_greater from sklearn.multiclass import OneVsRestClassifier from sklearn.multiclass import OneVsOneClassifier from sklearn.multiclass import OutputCodeClassifier from sklearn.multiclass import fit_ovr from sklearn.multiclass import fit_ovo from sklearn.multiclass import fit_ecoc from sklearn.multiclass import predict_ovr from sklearn.multiclass import predict_ovo from sklearn.multiclass import predict_ecoc from sklearn.multiclass import predict_proba_ovr from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.preprocessing import LabelBinarizer from sklearn.svm import LinearSVC, SVC from sklearn.naive_bayes import MultinomialNB from sklearn.linear_model import (LinearRegression, Lasso, ElasticNet, Ridge, Perceptron, LogisticRegression) from sklearn.tree import DecisionTreeClassifier from sklearn.grid_search import GridSearchCV from sklearn.pipeline import Pipeline from sklearn import svm from sklearn import datasets from sklearn.externals.six.moves import zip iris = datasets.load_iris() rng = np.random.RandomState(0) perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] n_classes = 3 def test_ovr_exceptions(): ovr = OneVsRestClassifier(LinearSVC(random_state=0)) assert_raises(ValueError, ovr.predict, []) with ignore_warnings(): assert_raises(ValueError, predict_ovr, [LinearSVC(), MultinomialNB()], LabelBinarizer(), []) # Fail on multioutput data assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit, np.array([[1, 0], [0, 1]]), np.array([[1, 2], [3, 1]])) assert_raises(ValueError, OneVsRestClassifier(MultinomialNB()).fit, np.array([[1, 0], [0, 1]]), np.array([[1.5, 2.4], [3.1, 0.8]])) def test_ovr_fit_predict(): # A classifier which implements decision_function. ovr = OneVsRestClassifier(LinearSVC(random_state=0)) pred = ovr.fit(iris.data, iris.target).predict(iris.data) assert_equal(len(ovr.estimators_), n_classes) clf = LinearSVC(random_state=0) pred2 = clf.fit(iris.data, iris.target).predict(iris.data) assert_equal(np.mean(iris.target == pred), np.mean(iris.target == pred2)) # A classifier which implements predict_proba. ovr = OneVsRestClassifier(MultinomialNB()) pred = ovr.fit(iris.data, iris.target).predict(iris.data) assert_greater(np.mean(iris.target == pred), 0.65) def test_ovr_fit_predict_sparse(): for sparse in [sp.csr_matrix, sp.csc_matrix, sp.coo_matrix, sp.dok_matrix, sp.lil_matrix]: base_clf = MultinomialNB(alpha=1) X, Y = datasets.make_multilabel_classification(n_samples=100, n_features=20, n_classes=5, n_labels=3, length=50, allow_unlabeled=True, return_indicator=True, random_state=0) X_train, Y_train = X[:80], Y[:80] X_test, Y_test = X[80:], Y[80:] clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train) Y_pred = clf.predict(X_test) clf_sprs = OneVsRestClassifier(base_clf).fit(X_train, sparse(Y_train)) Y_pred_sprs = clf_sprs.predict(X_test) assert_true(clf.multilabel_) assert_true(sp.issparse(Y_pred_sprs)) assert_array_equal(Y_pred_sprs.toarray(), Y_pred) # Test predict_proba Y_proba = clf_sprs.predict_proba(X_test) # predict assigns a label if the probability that the # sample has the label is greater than 0.5. pred = Y_proba > .5 assert_array_equal(pred, Y_pred_sprs.toarray()) # Test decision_function clf_sprs = OneVsRestClassifier(svm.SVC()).fit(X_train, sparse(Y_train)) dec_pred = (clf_sprs.decision_function(X_test) > 0).astype(int) assert_array_equal(dec_pred, clf_sprs.predict(X_test).toarray()) def test_ovr_always_present(): """Test that ovr works with classes that are always present or absent.""" # Note: tests is the case where _ConstantPredictor is utilised X = np.ones((10, 2)) X[:5, :] = 0 # Build an indicator matrix where two features are always on. # As list of lists, it would be: [[int(i >= 5), 2, 3] for i in range(10)] y = np.zeros((10, 3)) y[5:, 0] = 1 y[:, 1] = 1 y[:, 2] = 1 ovr = OneVsRestClassifier(LogisticRegression()) assert_warns(UserWarning, ovr.fit, X, y) y_pred = ovr.predict(X) assert_array_equal(np.array(y_pred), np.array(y)) y_pred = ovr.decision_function(X) assert_equal(np.unique(y_pred[:, -2:]), 1) y_pred = ovr.predict_proba(X) assert_array_equal(y_pred[:, -1], np.ones(X.shape[0])) # y has a constantly absent label y = np.zeros((10, 2)) y[5:, 0] = 1 # variable label ovr = OneVsRestClassifier(LogisticRegression()) assert_warns(UserWarning, ovr.fit, X, y) y_pred = ovr.predict_proba(X) assert_array_equal(y_pred[:, -1], np.zeros(X.shape[0])) def test_ovr_multiclass(): # Toy dataset where features correspond directly to labels. X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]]) y = ["eggs", "spam", "ham", "eggs", "ham"] Y = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0], [0, 0, 1], [1, 0, 0]]) classes = set("ham eggs spam".split()) for base_clf in (MultinomialNB(), LinearSVC(random_state=0), LinearRegression(), Ridge(), ElasticNet()): clf = OneVsRestClassifier(base_clf).fit(X, y) assert_equal(set(clf.classes_), classes) y_pred = clf.predict(np.array([[0, 0, 4]]))[0] assert_equal(set(y_pred), set("eggs")) # test input as label indicator matrix clf = OneVsRestClassifier(base_clf).fit(X, Y) y_pred = clf.predict([[0, 0, 4]])[0] assert_array_equal(y_pred, [0, 0, 1]) def test_ovr_binary(): # Toy dataset where features correspond directly to labels. X = np.array([[0, 0, 5], [0, 5, 0], [3, 0, 0], [0, 0, 6], [6, 0, 0]]) y = ["eggs", "spam", "spam", "eggs", "spam"] Y = np.array([[0, 1, 1, 0, 1]]).T classes = set("eggs spam".split()) def conduct_test(base_clf, test_predict_proba=False): clf = OneVsRestClassifier(base_clf).fit(X, y) assert_equal(set(clf.classes_), classes) y_pred = clf.predict(np.array([[0, 0, 4]]))[0] assert_equal(set(y_pred), set("eggs")) if test_predict_proba: X_test = np.array([[0, 0, 4]]) probabilities = clf.predict_proba(X_test) assert_equal(2, len(probabilities[0])) assert_equal(clf.classes_[np.argmax(probabilities, axis=1)], clf.predict(X_test)) # test input as label indicator matrix clf = OneVsRestClassifier(base_clf).fit(X, Y) y_pred = clf.predict([[3, 0, 0]])[0] assert_equal(y_pred, 1) for base_clf in (LinearSVC(random_state=0), LinearRegression(), Ridge(), ElasticNet()): conduct_test(base_clf) for base_clf in (MultinomialNB(), SVC(probability=True), LogisticRegression()): conduct_test(base_clf, test_predict_proba=True) @ignore_warnings def test_ovr_multilabel(): # Toy dataset where features correspond directly to labels. X = np.array([[0, 4, 5], [0, 5, 0], [3, 3, 3], [4, 0, 6], [6, 0, 0]]) y = [["spam", "eggs"], ["spam"], ["ham", "eggs", "spam"], ["ham", "eggs"], ["ham"]] # y = [[1, 2], [1], [0, 1, 2], [0, 2], [0]] Y = np.array([[0, 1, 1], [0, 1, 0], [1, 1, 1], [1, 0, 1], [1, 0, 0]]) classes = set("ham eggs spam".split()) for base_clf in (MultinomialNB(), LinearSVC(random_state=0), LinearRegression(), Ridge(), ElasticNet(), Lasso(alpha=0.5)): # test input as lists of tuples clf = assert_warns(DeprecationWarning, OneVsRestClassifier(base_clf).fit, X, y) assert_equal(set(clf.classes_), classes) y_pred = clf.predict([[0, 4, 4]])[0] assert_equal(set(y_pred), set(["spam", "eggs"])) assert_true(clf.multilabel_) # test input as label indicator matrix clf = OneVsRestClassifier(base_clf).fit(X, Y) y_pred = clf.predict([[0, 4, 4]])[0] assert_array_equal(y_pred, [0, 1, 1]) assert_true(clf.multilabel_) def test_ovr_fit_predict_svc(): ovr = OneVsRestClassifier(svm.SVC()) ovr.fit(iris.data, iris.target) assert_equal(len(ovr.estimators_), 3) assert_greater(ovr.score(iris.data, iris.target), .9) def test_ovr_multilabel_dataset(): base_clf = MultinomialNB(alpha=1) for au, prec, recall in zip((True, False), (0.51, 0.66), (0.51, 0.80)): X, Y = datasets.make_multilabel_classification(n_samples=100, n_features=20, n_classes=5, n_labels=2, length=50, allow_unlabeled=au, return_indicator=True, random_state=0) X_train, Y_train = X[:80], Y[:80] X_test, Y_test = X[80:], Y[80:] clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train) Y_pred = clf.predict(X_test) assert_true(clf.multilabel_) assert_almost_equal(precision_score(Y_test, Y_pred, average="micro"), prec, decimal=2) assert_almost_equal(recall_score(Y_test, Y_pred, average="micro"), recall, decimal=2) def test_ovr_multilabel_predict_proba(): base_clf = MultinomialNB(alpha=1) for au in (False, True): X, Y = datasets.make_multilabel_classification(n_samples=100, n_features=20, n_classes=5, n_labels=3, length=50, allow_unlabeled=au, return_indicator=True, random_state=0) X_train, Y_train = X[:80], Y[:80] X_test, Y_test = X[80:], Y[80:] clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train) # decision function only estimator. Fails in current implementation. decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train) assert_raises(AttributeError, decision_only.predict_proba, X_test) # Estimator with predict_proba disabled, depending on parameters. decision_only = OneVsRestClassifier(svm.SVC(probability=False)) decision_only.fit(X_train, Y_train) assert_raises(AttributeError, decision_only.predict_proba, X_test) Y_pred = clf.predict(X_test) Y_proba = clf.predict_proba(X_test) # predict assigns a label if the probability that the # sample has the label is greater than 0.5. pred = Y_proba > .5 assert_array_equal(pred, Y_pred) def test_ovr_single_label_predict_proba(): base_clf = MultinomialNB(alpha=1) X, Y = iris.data, iris.target X_train, Y_train = X[:80], Y[:80] X_test, Y_test = X[80:], Y[80:] clf = OneVsRestClassifier(base_clf).fit(X_train, Y_train) # decision function only estimator. Fails in current implementation. decision_only = OneVsRestClassifier(svm.SVR()).fit(X_train, Y_train) assert_raises(AttributeError, decision_only.predict_proba, X_test) Y_pred = clf.predict(X_test) Y_proba = clf.predict_proba(X_test) assert_almost_equal(Y_proba.sum(axis=1), 1.0) # predict assigns a label if the probability that the # sample has the label is greater than 0.5. pred = np.array([l.argmax() for l in Y_proba]) assert_false((pred - Y_pred).any()) def test_ovr_multilabel_decision_function(): X, Y = datasets.make_multilabel_classification(n_samples=100, n_features=20, n_classes=5, n_labels=3, length=50, allow_unlabeled=True, return_indicator=True, random_state=0) X_train, Y_train = X[:80], Y[:80] X_test, Y_test = X[80:], Y[80:] clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train) assert_array_equal((clf.decision_function(X_test) > 0).astype(int), clf.predict(X_test)) def test_ovr_single_label_decision_function(): X, Y = datasets.make_classification(n_samples=100, n_features=20, random_state=0) X_train, Y_train = X[:80], Y[:80] X_test, Y_test = X[80:], Y[80:] clf = OneVsRestClassifier(svm.SVC()).fit(X_train, Y_train) assert_array_equal(clf.decision_function(X_test).ravel() > 0, clf.predict(X_test)) def test_ovr_gridsearch(): ovr = OneVsRestClassifier(LinearSVC(random_state=0)) Cs = [0.1, 0.5, 0.8] cv = GridSearchCV(ovr, {'estimator__C': Cs}) cv.fit(iris.data, iris.target) best_C = cv.best_estimator_.estimators_[0].C assert_true(best_C in Cs) def test_ovr_pipeline(): # Test with pipeline of length one # This test is needed because the multiclass estimators may fail to detect # the presence of predict_proba or decision_function. clf = Pipeline([("tree", DecisionTreeClassifier())]) ovr_pipe = OneVsRestClassifier(clf) ovr_pipe.fit(iris.data, iris.target) ovr = OneVsRestClassifier(DecisionTreeClassifier()) ovr.fit(iris.data, iris.target) assert_array_equal(ovr.predict(iris.data), ovr_pipe.predict(iris.data)) def test_ovr_coef_(): ovr = OneVsRestClassifier(LinearSVC(random_state=0)) ovr.fit(iris.data, iris.target) shape = ovr.coef_.shape assert_equal(shape[0], n_classes) assert_equal(shape[1], iris.data.shape[1]) def test_ovr_coef_exceptions(): # Not fitted exception! ovr = OneVsRestClassifier(LinearSVC(random_state=0)) # lambda is needed because we don't want coef_ to be evaluated right away assert_raises(ValueError, lambda x: ovr.coef_, None) # Doesn't have coef_ exception! ovr = OneVsRestClassifier(DecisionTreeClassifier()) ovr.fit(iris.data, iris.target) assert_raises(AttributeError, lambda x: ovr.coef_, None) def test_ovo_exceptions(): ovo = OneVsOneClassifier(LinearSVC(random_state=0)) assert_raises(ValueError, ovo.predict, []) def test_ovo_fit_predict(): # A classifier which implements decision_function. ovo = OneVsOneClassifier(LinearSVC(random_state=0)) ovo.fit(iris.data, iris.target).predict(iris.data) assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2) # A classifier which implements predict_proba. ovo = OneVsOneClassifier(MultinomialNB()) ovo.fit(iris.data, iris.target).predict(iris.data) assert_equal(len(ovo.estimators_), n_classes * (n_classes - 1) / 2) def test_ovo_gridsearch(): ovo = OneVsOneClassifier(LinearSVC(random_state=0)) Cs = [0.1, 0.5, 0.8] cv = GridSearchCV(ovo, {'estimator__C': Cs}) cv.fit(iris.data, iris.target) best_C = cv.best_estimator_.estimators_[0].C assert_true(best_C in Cs) def test_ovo_ties(): # test that ties are broken using the decision function, not defaulting to # the smallest label X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]]) y = np.array([2, 0, 1, 2]) multi_clf = OneVsOneClassifier(Perceptron()) ovo_prediction = multi_clf.fit(X, y).predict(X) # recalculate votes to make sure we have a tie predictions = np.vstack([clf.predict(X) for clf in multi_clf.estimators_]) scores = np.vstack([clf.decision_function(X) for clf in multi_clf.estimators_]) # classifiers are in order 0-1, 0-2, 1-2 # aggregate votes: votes = np.zeros((4, 3)) votes[np.arange(4), predictions[0]] += 1 votes[np.arange(4), 2 * predictions[1]] += 1 votes[np.arange(4), 1 + predictions[2]] += 1 # for the first point, there is one vote per class assert_array_equal(votes[0, :], 1) # for the rest, there is no tie and the prediction is the argmax assert_array_equal(np.argmax(votes[1:], axis=1), ovo_prediction[1:]) # for the tie, the prediction is the class with the highest score assert_equal(ovo_prediction[0], 0) # in the zero-one classifier, the score for 0 is greater than the score for # one. assert_greater(scores[0][0], scores[0][1]) # score for one is greater than score for zero assert_greater(scores[2, 0] - scores[0, 0], scores[0, 0] + scores[1, 0]) # score for one is greater than score for two assert_greater(scores[2, 0] - scores[0, 0], -scores[1, 0] - scores[2, 0]) def test_ovo_ties2(): # test that ties can not only be won by the first two labels X = np.array([[1, 2], [2, 1], [-2, 1], [-2, -1]]) y_ref = np.array([2, 0, 1, 2]) # cycle through labels so that each label wins once for i in range(3): y = (y_ref + i) % 3 multi_clf = OneVsOneClassifier(Perceptron()) ovo_prediction = multi_clf.fit(X, y).predict(X) assert_equal(ovo_prediction[0], i % 3) def test_ovo_string_y(): "Test that the OvO doesn't screw the encoding of string labels" X = np.eye(4) y = np.array(['a', 'b', 'c', 'd']) svc = LinearSVC() ovo = OneVsOneClassifier(svc) ovo.fit(X, y) assert_array_equal(y, ovo.predict(X)) def test_ecoc_exceptions(): ecoc = OutputCodeClassifier(LinearSVC(random_state=0)) assert_raises(ValueError, ecoc.predict, []) def test_ecoc_fit_predict(): # A classifier which implements decision_function. ecoc = OutputCodeClassifier(LinearSVC(random_state=0), code_size=2, random_state=0) ecoc.fit(iris.data, iris.target).predict(iris.data) assert_equal(len(ecoc.estimators_), n_classes * 2) # A classifier which implements predict_proba. ecoc = OutputCodeClassifier(MultinomialNB(), code_size=2, random_state=0) ecoc.fit(iris.data, iris.target).predict(iris.data) assert_equal(len(ecoc.estimators_), n_classes * 2) def test_ecoc_gridsearch(): ecoc = OutputCodeClassifier(LinearSVC(random_state=0), random_state=0) Cs = [0.1, 0.5, 0.8] cv = GridSearchCV(ecoc, {'estimator__C': Cs}) cv.fit(iris.data, iris.target) best_C = cv.best_estimator_.estimators_[0].C assert_true(best_C in Cs) @ignore_warnings def test_deprecated(): base_estimator = DecisionTreeClassifier(random_state=0) X, Y = iris.data, iris.target X_train, Y_train = X[:80], Y[:80] X_test, Y_test = X[80:], Y[80:] all_metas = [ (OneVsRestClassifier, fit_ovr, predict_ovr, predict_proba_ovr), (OneVsOneClassifier, fit_ovo, predict_ovo, None), (OutputCodeClassifier, fit_ecoc, predict_ecoc, None), ] for MetaEst, fit_func, predict_func, proba_func in all_metas: try: meta_est = MetaEst(base_estimator, random_state=0).fit(X_train, Y_train) fitted_return = fit_func(base_estimator, X_train, Y_train, random_state=0) except TypeError: meta_est = MetaEst(base_estimator).fit(X_train, Y_train) fitted_return = fit_func(base_estimator, X_train, Y_train) if len(fitted_return) == 2: estimators_, classes_or_lb = fitted_return assert_almost_equal(predict_func(estimators_, classes_or_lb, X_test), meta_est.predict(X_test)) if proba_func is not None: assert_almost_equal(proba_func(estimators_, X_test, is_multilabel=False), meta_est.predict_proba(X_test)) else: estimators_, classes_or_lb, codebook = fitted_return assert_almost_equal(predict_func(estimators_, classes_or_lb, codebook, X_test), meta_est.predict(X_test)) if __name__ == "__main__": import nose nose.runmodule()
bsd-3-clause
liyu1990/sklearn
examples/manifold/plot_swissroll.py
330
1446
""" =================================== Swiss Roll reduction with LLE =================================== An illustration of Swiss Roll reduction with locally linear embedding """ # Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr> # License: BSD 3 clause (C) INRIA 2011 print(__doc__) import matplotlib.pyplot as plt # This import is needed to modify the way figure behaves from mpl_toolkits.mplot3d import Axes3D Axes3D #---------------------------------------------------------------------- # Locally linear embedding of the swiss roll from sklearn import manifold, datasets X, color = datasets.samples_generator.make_swiss_roll(n_samples=1500) print("Computing LLE embedding") X_r, err = manifold.locally_linear_embedding(X, n_neighbors=12, n_components=2) print("Done. Reconstruction error: %g" % err) #---------------------------------------------------------------------- # Plot result fig = plt.figure() try: # compatibility matplotlib < 1.0 ax = fig.add_subplot(211, projection='3d') ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral) except: ax = fig.add_subplot(211) ax.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral) ax.set_title("Original data") ax = fig.add_subplot(212) ax.scatter(X_r[:, 0], X_r[:, 1], c=color, cmap=plt.cm.Spectral) plt.axis('tight') plt.xticks([]), plt.yticks([]) plt.title('Projected data') plt.show()
bsd-3-clause
hmendozap/master-arbeit-projects
autosk_dev_test/component/LinReg.py
1
8756
import numpy as np import scipy.sparse as sp from HPOlibConfigSpace.configuration_space import ConfigurationSpace from HPOlibConfigSpace.conditions import EqualsCondition, InCondition from HPOlibConfigSpace.hyperparameters import UniformFloatHyperparameter, \ UniformIntegerHyperparameter, CategoricalHyperparameter, Constant from autosklearn.pipeline.components.base import AutoSklearnRegressionAlgorithm from autosklearn.pipeline.constants import * class LinReg(AutoSklearnRegressionAlgorithm): def __init__(self, number_updates, batch_size, dropout_output, learning_rate, solver, lambda2, momentum=0.99, beta1=0.9, beta2=0.9, rho=0.95, lr_policy='fixed', gamma=0.01, power=1.0, epoch_step=2, random_state=None): self.number_updates = number_updates self.batch_size = batch_size self.dropout_output = dropout_output self.learning_rate = learning_rate self.lr_policy = lr_policy self.lambda2 = lambda2 self.momentum = momentum self.beta1 = 1-beta1 if beta1 is not None else 0.9 self.beta2 = 1-beta2 if beta2 is not None else 0.99 self.rho = rho self.solver = solver self.gamma = gamma self.power = power self.epoch_step = epoch_step # Empty features and shape self.n_features = None self.input_shape = None self.m_issparse = False self.m_isregression = True self.m_isbinary = False self.m_ismultilabel = False self.estimator = None def _prefit(self, X, y): self.batch_size = int(self.batch_size) self.n_features = X.shape[1] self.input_shape = (self.batch_size, self.n_features) self.num_output_units = 1 # Regression # Normalize the output self.mean_y = np.mean(y) self.std_y = np.std(y) y = (y - self.mean_y) / self.std_y if len(y.shape) == 1: y = y[:, np.newaxis] self.m_issparse = sp.issparse(X) return X, y def fit(self, X, y): Xf, yf = self._prefit(X, y) epoch = (self.number_updates * self.batch_size)//X.shape[0] number_epochs = min(max(2, epoch), 110) # Cap the max number of possible epochs from implementation import LogisticRegression self.estimator = LogisticRegression.LogisticRegression(batch_size=self.batch_size, input_shape=self.input_shape, num_output_units=self.num_output_units, dropout_output=self.dropout_output, learning_rate=self.learning_rate, lr_policy=self.lr_policy, lambda2=self.lambda2, momentum=self.momentum, beta1=self.beta1, beta2=self.beta2, rho=self.rho, solver=self.solver, num_epochs=number_epochs, gamma=self.gamma, power=self.power, epoch_step=self.epoch_step, is_sparse=self.m_issparse, is_binary=self.m_isbinary, is_multilabel=self.m_ismultilabel, is_regression=self.m_isregression) self.estimator.fit(Xf, yf) return self def predict(self, X): if self.estimator is None: raise NotImplementedError preds = self.estimator.predict(X, self.m_issparse) return preds * self.std_y + self.mean_y def predict_proba(self, X): if self.estimator is None: raise NotImplementedError() return self.estimator.predict_proba(X, self.m_issparse) @staticmethod def get_properties(dataset_properties=None): return {'shortname': 'lin_reg', 'name': 'Linear Regression', 'handles_regression': True, 'handles_classification': False, 'handles_multiclass': False, 'handles_multilabel': False, 'is_deterministic': True, 'input': (DENSE, SPARSE, UNSIGNED_DATA), 'output': (PREDICTIONS,)} @staticmethod def get_hyperparameter_search_space(dataset_properties=None): policy_choices = ['fixed', 'inv', 'exp', 'step'] batch_size = UniformIntegerHyperparameter("batch_size", 100, 3000, log=True, default=150) number_updates = UniformIntegerHyperparameter("number_updates", 500, 10500, log=True, default=500) dropout_output = UniformFloatHyperparameter("dropout_output", 0.0, 0.99, default=0.5) lr = UniformFloatHyperparameter("learning_rate", 1e-6, 0.1, log=True, default=0.01) l2 = UniformFloatHyperparameter("lambda2", 1e-6, 1e-2, log=True, default=1e-3) solver = CategoricalHyperparameter(name="solver", choices=["sgd", "adam"], default="sgd") beta1 = UniformFloatHyperparameter("beta1", 1e-4, 0.1, log=True, default=0.1) beta2 = UniformFloatHyperparameter("beta2", 1e-4, 0.1, log=True, default=0.01) lr_policy = CategoricalHyperparameter(name="lr_policy", choices=policy_choices, default='fixed') gamma = UniformFloatHyperparameter(name="gamma", lower=1e-3, upper=1e-1, default=1e-2) power = UniformFloatHyperparameter("power", 0.0, 1.0, default=0.5) epoch_step = UniformIntegerHyperparameter("epoch_step", 2, 20, default=5) cs = ConfigurationSpace() cs.add_hyperparameter(number_updates) cs.add_hyperparameter(batch_size) cs.add_hyperparameter(dropout_output) cs.add_hyperparameter(lr) cs.add_hyperparameter(l2) cs.add_hyperparameter(solver) cs.add_hyperparameter(beta1) cs.add_hyperparameter(beta2) cs.add_hyperparameter(lr_policy) cs.add_hyperparameter(gamma) cs.add_hyperparameter(power) cs.add_hyperparameter(epoch_step) beta1_depends_on_solver = EqualsCondition(beta1, solver, "adam") beta2_depends_on_solver = EqualsCondition(beta2, solver, "adam") gamma_depends_on_policy = InCondition(child=gamma, parent=lr_policy, values=['inv', 'exp', 'step']) power_depends_on_policy = EqualsCondition(power, lr_policy, 'inv') epoch_step_depends_on_policy = EqualsCondition(epoch_step, lr_policy, 'step') cs.add_condition(beta1_depends_on_solver) cs.add_condition(beta2_depends_on_solver) cs.add_condition(gamma_depends_on_policy) cs.add_condition(power_depends_on_policy) cs.add_condition(epoch_step_depends_on_policy) return cs
mit
PrashntS/scikit-learn
sklearn/linear_model/ridge.py
60
44642
""" Ridge regression """ # Author: Mathieu Blondel <mathieu@mblondel.org> # Reuben Fletcher-Costin <reuben.fletchercostin@gmail.com> # Fabian Pedregosa <fabian@fseoane.net> # Michael Eickenberg <michael.eickenberg@nsup.org> # License: BSD 3 clause from abc import ABCMeta, abstractmethod import warnings import numpy as np from scipy import linalg from scipy import sparse from scipy.sparse import linalg as sp_linalg from .base import LinearClassifierMixin, LinearModel, _rescale_data from .sag import sag_solver from .sag_fast import get_max_squared_sum from ..base import RegressorMixin from ..utils.extmath import safe_sparse_dot from ..utils import check_X_y from ..utils import check_array from ..utils import check_consistent_length from ..utils import compute_sample_weight from ..utils import column_or_1d from ..preprocessing import LabelBinarizer from ..grid_search import GridSearchCV from ..externals import six from ..metrics.scorer import check_scoring def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0): n_samples, n_features = X.shape X1 = sp_linalg.aslinearoperator(X) coefs = np.empty((y.shape[1], n_features)) if n_features > n_samples: def create_mv(curr_alpha): def _mv(x): return X1.matvec(X1.rmatvec(x)) + curr_alpha * x return _mv else: def create_mv(curr_alpha): def _mv(x): return X1.rmatvec(X1.matvec(x)) + curr_alpha * x return _mv for i in range(y.shape[1]): y_column = y[:, i] mv = create_mv(alpha[i]) if n_features > n_samples: # kernel ridge # w = X.T * inv(X X^t + alpha*Id) y C = sp_linalg.LinearOperator( (n_samples, n_samples), matvec=mv, dtype=X.dtype) coef, info = sp_linalg.cg(C, y_column, tol=tol) coefs[i] = X1.rmatvec(coef) else: # linear ridge # w = inv(X^t X + alpha*Id) * X.T y y_column = X1.rmatvec(y_column) C = sp_linalg.LinearOperator( (n_features, n_features), matvec=mv, dtype=X.dtype) coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter, tol=tol) if info < 0: raise ValueError("Failed with error code %d" % info) if max_iter is None and info > 0 and verbose: warnings.warn("sparse_cg did not converge after %d iterations." % info) return coefs def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3): n_samples, n_features = X.shape coefs = np.empty((y.shape[1], n_features)) n_iter = np.empty(y.shape[1], dtype=np.int32) # According to the lsqr documentation, alpha = damp^2. sqrt_alpha = np.sqrt(alpha) for i in range(y.shape[1]): y_column = y[:, i] info = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i], atol=tol, btol=tol, iter_lim=max_iter) coefs[i] = info[0] n_iter[i] = info[2] return coefs, n_iter def _solve_cholesky(X, y, alpha): # w = inv(X^t X + alpha*Id) * X.T y n_samples, n_features = X.shape n_targets = y.shape[1] A = safe_sparse_dot(X.T, X, dense_output=True) Xy = safe_sparse_dot(X.T, y, dense_output=True) one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]]) if one_alpha: A.flat[::n_features + 1] += alpha[0] return linalg.solve(A, Xy, sym_pos=True, overwrite_a=True).T else: coefs = np.empty([n_targets, n_features]) for coef, target, current_alpha in zip(coefs, Xy.T, alpha): A.flat[::n_features + 1] += current_alpha coef[:] = linalg.solve(A, target, sym_pos=True, overwrite_a=False).ravel() A.flat[::n_features + 1] -= current_alpha return coefs def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False): # dual_coef = inv(X X^t + alpha*Id) y n_samples = K.shape[0] n_targets = y.shape[1] if copy: K = K.copy() alpha = np.atleast_1d(alpha) one_alpha = (alpha == alpha[0]).all() has_sw = isinstance(sample_weight, np.ndarray) \ or sample_weight not in [1.0, None] if has_sw: # Unlike other solvers, we need to support sample_weight directly # because K might be a pre-computed kernel. sw = np.sqrt(np.atleast_1d(sample_weight)) y = y * sw[:, np.newaxis] K *= np.outer(sw, sw) if one_alpha: # Only one penalty, we can solve multi-target problems in one time. K.flat[::n_samples + 1] += alpha[0] try: # Note: we must use overwrite_a=False in order to be able to # use the fall-back solution below in case a LinAlgError # is raised dual_coef = linalg.solve(K, y, sym_pos=True, overwrite_a=False) except np.linalg.LinAlgError: warnings.warn("Singular matrix in solving dual problem. Using " "least-squares solution instead.") dual_coef = linalg.lstsq(K, y)[0] # K is expensive to compute and store in memory so change it back in # case it was user-given. K.flat[::n_samples + 1] -= alpha[0] if has_sw: dual_coef *= sw[:, np.newaxis] return dual_coef else: # One penalty per target. We need to solve each target separately. dual_coefs = np.empty([n_targets, n_samples]) for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha): K.flat[::n_samples + 1] += current_alpha dual_coef[:] = linalg.solve(K, target, sym_pos=True, overwrite_a=False).ravel() K.flat[::n_samples + 1] -= current_alpha if has_sw: dual_coefs *= sw[np.newaxis, :] return dual_coefs.T def _solve_svd(X, y, alpha): U, s, Vt = linalg.svd(X, full_matrices=False) idx = s > 1e-15 # same default value as scipy.linalg.pinv s_nnz = s[idx][:, np.newaxis] UTy = np.dot(U.T, y) d = np.zeros((s.size, alpha.size)) d[idx] = s_nnz / (s_nnz ** 2 + alpha) d_UT_y = d * UTy return np.dot(Vt.T, d_UT_y).T def ridge_regression(X, y, alpha, sample_weight=None, solver='auto', max_iter=None, tol=1e-3, verbose=0, random_state=None, return_n_iter=False): """Solve the ridge equation by the method of normal equations. Read more in the :ref:`User Guide <ridge_regression>`. Parameters ---------- X : {array-like, sparse matrix, LinearOperator}, shape = [n_samples, n_features] Training data y : array-like, shape = [n_samples] or [n_samples, n_targets] Target values alpha : {float, array-like}, shape = [n_targets] if array-like The l_2 penalty to be used. If an array is passed, penalties are assumed to be specific to targets max_iter : int, optional Maximum number of iterations for conjugate gradient solver. For 'sparse_cg' and 'lsqr' solvers, the default value is determined by scipy.sparse.linalg. For 'sag' solver, the default value is 1000. sample_weight : float or numpy array of shape [n_samples] Individual weights for each sample. If sample_weight is not None and solver='auto', the solver will be set to 'cholesky'. solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'} Solver to use in the computational routines: - 'auto' chooses the solver automatically based on the type of data. - 'svd' uses a Singular Value Decomposition of X to compute the Ridge coefficients. More stable for singular matrices than 'cholesky'. - 'cholesky' uses the standard scipy.linalg.solve function to obtain a closed-form solution via a Cholesky decomposition of dot(X.T, X) - 'sparse_cg' uses the conjugate gradient solver as found in scipy.sparse.linalg.cg. As an iterative algorithm, this solver is more appropriate than 'cholesky' for large-scale data (possibility to set `tol` and `max_iter`). - 'lsqr' uses the dedicated regularized least-squares routine scipy.sparse.linalg.lsqr. It is the fatest but may not be available in old scipy versions. It also uses an iterative procedure. - 'sag' uses a Stochastic Average Gradient descent. It also uses an iterative procedure, and is often faster than other solvers when both n_samples and n_features are large. Note that 'sag' fast convergence is only guaranteed on features with approximately the same scale. You can preprocess the data with a scaler from sklearn.preprocessing. All last four solvers support both dense and sparse data. tol : float Precision of the solution. verbose : int Verbosity level. Setting verbose > 0 will display additional information depending on the solver used. random_state : int seed, RandomState instance, or None (default) The seed of the pseudo random number generator to use when shuffling the data. Used in 'sag' solver. return_n_iter : boolean, default False If True, the method also returns `n_iter`, the actual number of iteration performed by the solver. Returns ------- coef : array, shape = [n_features] or [n_targets, n_features] Weight vector(s). n_iter : int, optional The actual number of iteration performed by the solver. Only returned if `return_n_iter` is True. Notes ----- This function won't compute the intercept. """ # SAG needs X and y columns to be C-contiguous and np.float64 if solver == 'sag': X = check_array(X, accept_sparse=['csr'], dtype=np.float64, order='C') y = check_array(y, dtype=np.float64, ensure_2d=False, order='F') else: X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64) y = check_array(y, dtype='numeric', ensure_2d=False) check_consistent_length(X, y) n_samples, n_features = X.shape if y.ndim > 2: raise ValueError("Target y has the wrong shape %s" % str(y.shape)) ravel = False if y.ndim == 1: y = y.reshape(-1, 1) ravel = True n_samples_, n_targets = y.shape if n_samples != n_samples_: raise ValueError("Number of samples in X and y does not correspond:" " %d != %d" % (n_samples, n_samples_)) has_sw = sample_weight is not None if solver == 'auto': # cholesky if it's a dense array and cg in any other case if not sparse.issparse(X) or has_sw: solver = 'cholesky' else: solver = 'sparse_cg' elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'): warnings.warn("""lsqr not available on this machine, falling back to sparse_cg.""") solver = 'sparse_cg' if has_sw: if np.atleast_1d(sample_weight).ndim > 1: raise ValueError("Sample weights must be 1D array or scalar") if solver != 'sag': # SAG supports sample_weight directly. For other solvers, # we implement sample_weight via a simple rescaling. X, y = _rescale_data(X, y, sample_weight) # There should be either 1 or n_targets penalties alpha = np.asarray(alpha).ravel() if alpha.size not in [1, n_targets]: raise ValueError("Number of targets and number of penalties " "do not correspond: %d != %d" % (alpha.size, n_targets)) if alpha.size == 1 and n_targets > 1: alpha = np.repeat(alpha, n_targets) if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr', 'sag'): raise ValueError('Solver %s not understood' % solver) n_iter = None if solver == 'sparse_cg': coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose) elif solver == 'lsqr': coef, n_iter = _solve_lsqr(X, y, alpha, max_iter, tol) elif solver == 'cholesky': if n_features > n_samples: K = safe_sparse_dot(X, X.T, dense_output=True) try: dual_coef = _solve_cholesky_kernel(K, y, alpha) coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T except linalg.LinAlgError: # use SVD solver if matrix is singular solver = 'svd' else: try: coef = _solve_cholesky(X, y, alpha) except linalg.LinAlgError: # use SVD solver if matrix is singular solver = 'svd' elif solver == 'sag': # precompute max_squared_sum for all targets max_squared_sum = get_max_squared_sum(X) coef = np.empty((y.shape[1], n_features)) n_iter = np.empty(y.shape[1], dtype=np.int32) for i, (alpha_i, target) in enumerate(zip(alpha, y.T)): coef_, n_iter_, _ = sag_solver( X, target.ravel(), sample_weight, 'squared', alpha_i, max_iter, tol, verbose, random_state, False, max_squared_sum, dict()) coef[i] = coef_ n_iter[i] = n_iter_ coef = np.asarray(coef) if solver == 'svd': if sparse.issparse(X): raise TypeError('SVD solver does not support sparse' ' inputs currently') coef = _solve_svd(X, y, alpha) if ravel: # When y was passed as a 1d-array, we flatten the coefficients. coef = coef.ravel() if return_n_iter: return coef, n_iter else: return coef class _BaseRidge(six.with_metaclass(ABCMeta, LinearModel)): @abstractmethod def __init__(self, alpha=1.0, fit_intercept=True, normalize=False, copy_X=True, max_iter=None, tol=1e-3, solver="auto", random_state=None): self.alpha = alpha self.fit_intercept = fit_intercept self.normalize = normalize self.copy_X = copy_X self.max_iter = max_iter self.tol = tol self.solver = solver self.random_state = random_state def fit(self, X, y, sample_weight=None): X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64, multi_output=True, y_numeric=True) if ((sample_weight is not None) and np.atleast_1d(sample_weight).ndim > 1): raise ValueError("Sample weights must be 1D array or scalar") X, y, X_mean, y_mean, X_std = self._center_data( X, y, self.fit_intercept, self.normalize, self.copy_X, sample_weight=sample_weight) self.coef_, self.n_iter_ = ridge_regression( X, y, alpha=self.alpha, sample_weight=sample_weight, max_iter=self.max_iter, tol=self.tol, solver=self.solver, random_state=self.random_state, return_n_iter=True) self._set_intercept(X_mean, y_mean, X_std) return self class Ridge(_BaseRidge, RegressorMixin): """Linear least squares with l2 regularization. This model solves a regression model where the loss function is the linear least squares function and regularization is given by the l2-norm. Also known as Ridge Regression or Tikhonov regularization. This estimator has built-in support for multi-variate regression (i.e., when y is a 2d-array of shape [n_samples, n_targets]). Read more in the :ref:`User Guide <ridge_regression>`. Parameters ---------- alpha : {float, array-like}, shape (n_targets) Small positive values of alpha improve the conditioning of the problem and reduce the variance of the estimates. Alpha corresponds to ``C^-1`` in other linear models such as LogisticRegression or LinearSVC. If an array is passed, penalties are assumed to be specific to the targets. Hence they must correspond in number. copy_X : boolean, optional, default True If True, X will be copied; else, it may be overwritten. fit_intercept : boolean Whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). max_iter : int, optional Maximum number of iterations for conjugate gradient solver. For 'sparse_cg' and 'lsqr' solvers, the default value is determined by scipy.sparse.linalg. For 'sag' solver, the default value is 1000. normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'} Solver to use in the computational routines: - 'auto' chooses the solver automatically based on the type of data. - 'svd' uses a Singular Value Decomposition of X to compute the Ridge coefficients. More stable for singular matrices than 'cholesky'. - 'cholesky' uses the standard scipy.linalg.solve function to obtain a closed-form solution. - 'sparse_cg' uses the conjugate gradient solver as found in scipy.sparse.linalg.cg. As an iterative algorithm, this solver is more appropriate than 'cholesky' for large-scale data (possibility to set `tol` and `max_iter`). - 'lsqr' uses the dedicated regularized least-squares routine scipy.sparse.linalg.lsqr. It is the fatest but may not be available in old scipy versions. It also uses an iterative procedure. - 'sag' uses a Stochastic Average Gradient descent. It also uses an iterative procedure, and is often faster than other solvers when both n_samples and n_features are large. Note that 'sag' fast convergence is only guaranteed on features with approximately the same scale. You can preprocess the data with a scaler from sklearn.preprocessing. All last four solvers support both dense and sparse data. tol : float Precision of the solution. random_state : int seed, RandomState instance, or None (default) The seed of the pseudo random number generator to use when shuffling the data. Used in 'sag' solver. Attributes ---------- coef_ : array, shape (n_features,) or (n_targets, n_features) Weight vector(s). intercept_ : float | array, shape = (n_targets,) Independent term in decision function. Set to 0.0 if ``fit_intercept = False``. n_iter_ : array or None, shape (n_targets,) Actual number of iterations for each target. Available only for sag and lsqr solvers. Other solvers will return None. See also -------- RidgeClassifier, RidgeCV, KernelRidge Examples -------- >>> from sklearn.linear_model import Ridge >>> import numpy as np >>> n_samples, n_features = 10, 5 >>> np.random.seed(0) >>> y = np.random.randn(n_samples) >>> X = np.random.randn(n_samples, n_features) >>> clf = Ridge(alpha=1.0) >>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None, normalize=False, random_state=None, solver='auto', tol=0.001) """ def __init__(self, alpha=1.0, fit_intercept=True, normalize=False, copy_X=True, max_iter=None, tol=1e-3, solver="auto", random_state=None): super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept, normalize=normalize, copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver, random_state=random_state) def fit(self, X, y, sample_weight=None): """Fit Ridge regression model Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training data y : array-like, shape = [n_samples] or [n_samples, n_targets] Target values sample_weight : float or numpy array of shape [n_samples] Individual weights for each sample Returns ------- self : returns an instance of self. """ return super(Ridge, self).fit(X, y, sample_weight=sample_weight) class RidgeClassifier(LinearClassifierMixin, _BaseRidge): """Classifier using Ridge regression. Read more in the :ref:`User Guide <ridge_regression>`. Parameters ---------- alpha : float Small positive values of alpha improve the conditioning of the problem and reduce the variance of the estimates. Alpha corresponds to ``C^-1`` in other linear models such as LogisticRegression or LinearSVC. class_weight : dict or 'balanced', optional Weights associated with classes in the form ``{class_label: weight}``. If not given, all classes are supposed to have weight one. The "balanced" mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))`` copy_X : boolean, optional, default True If True, X will be copied; else, it may be overwritten. fit_intercept : boolean Whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). max_iter : int, optional Maximum number of iterations for conjugate gradient solver. The default value is determined by scipy.sparse.linalg. normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'} Solver to use in the computational routines: - 'auto' chooses the solver automatically based on the type of data. - 'svd' uses a Singular Value Decomposition of X to compute the Ridge coefficients. More stable for singular matrices than 'cholesky'. - 'cholesky' uses the standard scipy.linalg.solve function to obtain a closed-form solution. - 'sparse_cg' uses the conjugate gradient solver as found in scipy.sparse.linalg.cg. As an iterative algorithm, this solver is more appropriate than 'cholesky' for large-scale data (possibility to set `tol` and `max_iter`). - 'lsqr' uses the dedicated regularized least-squares routine scipy.sparse.linalg.lsqr. It is the fatest but may not be available in old scipy versions. It also uses an iterative procedure. - 'sag' uses a Stochastic Average Gradient descent. It also uses an iterative procedure, and is faster than other solvers when both n_samples and n_features are large. tol : float Precision of the solution. random_state : int seed, RandomState instance, or None (default) The seed of the pseudo random number generator to use when shuffling the data. Used in 'sag' solver. Attributes ---------- coef_ : array, shape (n_features,) or (n_classes, n_features) Weight vector(s). intercept_ : float | array, shape = (n_targets,) Independent term in decision function. Set to 0.0 if ``fit_intercept = False``. n_iter_ : array or None, shape (n_targets,) Actual number of iterations for each target. Available only for sag and lsqr solvers. Other solvers will return None. See also -------- Ridge, RidgeClassifierCV Notes ----- For multi-class classification, n_class classifiers are trained in a one-versus-all approach. Concretely, this is implemented by taking advantage of the multi-variate response support in Ridge. """ def __init__(self, alpha=1.0, fit_intercept=True, normalize=False, copy_X=True, max_iter=None, tol=1e-3, class_weight=None, solver="auto", random_state=None): super(RidgeClassifier, self).__init__( alpha=alpha, fit_intercept=fit_intercept, normalize=normalize, copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver, random_state=random_state) self.class_weight = class_weight def fit(self, X, y, sample_weight=None): """Fit Ridge regression model. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples,n_features] Training data y : array-like, shape = [n_samples] Target values sample_weight : float or numpy array of shape (n_samples,) Sample weight. Returns ------- self : returns an instance of self. """ self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1) Y = self._label_binarizer.fit_transform(y) if not self._label_binarizer.y_type_.startswith('multilabel'): y = column_or_1d(y, warn=True) if self.class_weight: if sample_weight is None: sample_weight = 1. # modify the sample weights with the corresponding class weight sample_weight = (sample_weight * compute_sample_weight(self.class_weight, y)) super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight) return self @property def classes_(self): return self._label_binarizer.classes_ class _RidgeGCV(LinearModel): """Ridge regression with built-in Generalized Cross-Validation It allows efficient Leave-One-Out cross-validation. This class is not intended to be used directly. Use RidgeCV instead. Notes ----- We want to solve (K + alpha*Id)c = y, where K = X X^T is the kernel matrix. Let G = (K + alpha*Id)^-1. Dual solution: c = Gy Primal solution: w = X^T c Compute eigendecomposition K = Q V Q^T. Then G = Q (V + alpha*Id)^-1 Q^T, where (V + alpha*Id) is diagonal. It is thus inexpensive to inverse for many alphas. Let loov be the vector of prediction values for each example when the model was fitted with all examples but this example. loov = (KGY - diag(KG)Y) / diag(I-KG) Let looe be the vector of prediction errors for each example when the model was fitted with all examples but this example. looe = y - loov = c / diag(G) References ---------- http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf """ def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True, normalize=False, scoring=None, copy_X=True, gcv_mode=None, store_cv_values=False): self.alphas = np.asarray(alphas) self.fit_intercept = fit_intercept self.normalize = normalize self.scoring = scoring self.copy_X = copy_X self.gcv_mode = gcv_mode self.store_cv_values = store_cv_values def _pre_compute(self, X, y): # even if X is very sparse, K is usually very dense K = safe_sparse_dot(X, X.T, dense_output=True) v, Q = linalg.eigh(K) QT_y = np.dot(Q.T, y) return v, Q, QT_y def _decomp_diag(self, v_prime, Q): # compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T)) return (v_prime * Q ** 2).sum(axis=-1) def _diag_dot(self, D, B): # compute dot(diag(D), B) if len(B.shape) > 1: # handle case where B is > 1-d D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)] return D * B def _errors(self, alpha, y, v, Q, QT_y): # don't construct matrix G, instead compute action on y & diagonal w = 1.0 / (v + alpha) c = np.dot(Q, self._diag_dot(w, QT_y)) G_diag = self._decomp_diag(w, Q) # handle case where y is 2-d if len(y.shape) != 1: G_diag = G_diag[:, np.newaxis] return (c / G_diag) ** 2, c def _values(self, alpha, y, v, Q, QT_y): # don't construct matrix G, instead compute action on y & diagonal w = 1.0 / (v + alpha) c = np.dot(Q, self._diag_dot(w, QT_y)) G_diag = self._decomp_diag(w, Q) # handle case where y is 2-d if len(y.shape) != 1: G_diag = G_diag[:, np.newaxis] return y - (c / G_diag), c def _pre_compute_svd(self, X, y): if sparse.issparse(X): raise TypeError("SVD not supported for sparse matrices") U, s, _ = linalg.svd(X, full_matrices=0) v = s ** 2 UT_y = np.dot(U.T, y) return v, U, UT_y def _errors_svd(self, alpha, y, v, U, UT_y): w = ((v + alpha) ** -1) - (alpha ** -1) c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y G_diag = self._decomp_diag(w, U) + (alpha ** -1) if len(y.shape) != 1: # handle case where y is 2-d G_diag = G_diag[:, np.newaxis] return (c / G_diag) ** 2, c def _values_svd(self, alpha, y, v, U, UT_y): w = ((v + alpha) ** -1) - (alpha ** -1) c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y G_diag = self._decomp_diag(w, U) + (alpha ** -1) if len(y.shape) != 1: # handle case when y is 2-d G_diag = G_diag[:, np.newaxis] return y - (c / G_diag), c def fit(self, X, y, sample_weight=None): """Fit Ridge regression model Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] Training data y : array-like, shape = [n_samples] or [n_samples, n_targets] Target values sample_weight : float or array-like of shape [n_samples] Sample weight Returns ------- self : Returns self. """ X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float, multi_output=True, y_numeric=True) n_samples, n_features = X.shape X, y, X_mean, y_mean, X_std = LinearModel._center_data( X, y, self.fit_intercept, self.normalize, self.copy_X, sample_weight=sample_weight) gcv_mode = self.gcv_mode with_sw = len(np.shape(sample_weight)) if gcv_mode is None or gcv_mode == 'auto': if sparse.issparse(X) or n_features > n_samples or with_sw: gcv_mode = 'eigen' else: gcv_mode = 'svd' elif gcv_mode == "svd" and with_sw: # FIXME non-uniform sample weights not yet supported warnings.warn("non-uniform sample weights unsupported for svd, " "forcing usage of eigen") gcv_mode = 'eigen' if gcv_mode == 'eigen': _pre_compute = self._pre_compute _errors = self._errors _values = self._values elif gcv_mode == 'svd': # assert n_samples >= n_features _pre_compute = self._pre_compute_svd _errors = self._errors_svd _values = self._values_svd else: raise ValueError('bad gcv_mode "%s"' % gcv_mode) v, Q, QT_y = _pre_compute(X, y) n_y = 1 if len(y.shape) == 1 else y.shape[1] cv_values = np.zeros((n_samples * n_y, len(self.alphas))) C = [] scorer = check_scoring(self, scoring=self.scoring, allow_none=True) error = scorer is None for i, alpha in enumerate(self.alphas): weighted_alpha = (sample_weight * alpha if sample_weight is not None else alpha) if error: out, c = _errors(weighted_alpha, y, v, Q, QT_y) else: out, c = _values(weighted_alpha, y, v, Q, QT_y) cv_values[:, i] = out.ravel() C.append(c) if error: best = cv_values.mean(axis=0).argmin() else: # The scorer want an object that will make the predictions but # they are already computed efficiently by _RidgeGCV. This # identity_estimator will just return them def identity_estimator(): pass identity_estimator.decision_function = lambda y_predict: y_predict identity_estimator.predict = lambda y_predict: y_predict out = [scorer(identity_estimator, y.ravel(), cv_values[:, i]) for i in range(len(self.alphas))] best = np.argmax(out) self.alpha_ = self.alphas[best] self.dual_coef_ = C[best] self.coef_ = safe_sparse_dot(self.dual_coef_.T, X) self._set_intercept(X_mean, y_mean, X_std) if self.store_cv_values: if len(y.shape) == 1: cv_values_shape = n_samples, len(self.alphas) else: cv_values_shape = n_samples, n_y, len(self.alphas) self.cv_values_ = cv_values.reshape(cv_values_shape) return self class _BaseRidgeCV(LinearModel): def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True, normalize=False, scoring=None, cv=None, gcv_mode=None, store_cv_values=False): self.alphas = alphas self.fit_intercept = fit_intercept self.normalize = normalize self.scoring = scoring self.cv = cv self.gcv_mode = gcv_mode self.store_cv_values = store_cv_values def fit(self, X, y, sample_weight=None): """Fit Ridge regression model Parameters ---------- X : array-like, shape = [n_samples, n_features] Training data y : array-like, shape = [n_samples] or [n_samples, n_targets] Target values sample_weight : float or array-like of shape [n_samples] Sample weight Returns ------- self : Returns self. """ if self.cv is None: estimator = _RidgeGCV(self.alphas, fit_intercept=self.fit_intercept, normalize=self.normalize, scoring=self.scoring, gcv_mode=self.gcv_mode, store_cv_values=self.store_cv_values) estimator.fit(X, y, sample_weight=sample_weight) self.alpha_ = estimator.alpha_ if self.store_cv_values: self.cv_values_ = estimator.cv_values_ else: if self.store_cv_values: raise ValueError("cv!=None and store_cv_values=True " " are incompatible") parameters = {'alpha': self.alphas} fit_params = {'sample_weight': sample_weight} gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept), parameters, fit_params=fit_params, cv=self.cv) gs.fit(X, y) estimator = gs.best_estimator_ self.alpha_ = gs.best_estimator_.alpha self.coef_ = estimator.coef_ self.intercept_ = estimator.intercept_ return self class RidgeCV(_BaseRidgeCV, RegressorMixin): """Ridge regression with built-in cross-validation. By default, it performs Generalized Cross-Validation, which is a form of efficient Leave-One-Out cross-validation. Read more in the :ref:`User Guide <ridge_regression>`. Parameters ---------- alphas : numpy array of shape [n_alphas] Array of alpha values to try. Small positive values of alpha improve the conditioning of the problem and reduce the variance of the estimates. Alpha corresponds to ``C^-1`` in other linear models such as LogisticRegression or LinearSVC. fit_intercept : boolean Whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 3-fold cross-validation, - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. For integer/None inputs, if ``y`` is binary or multiclass, :class:`StratifiedKFold` used, else, :class:`KFold` is used. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. gcv_mode : {None, 'auto', 'svd', eigen'}, optional Flag indicating which strategy to use when performing Generalized Cross-Validation. Options are:: 'auto' : use svd if n_samples > n_features or when X is a sparse matrix, otherwise use eigen 'svd' : force computation via singular value decomposition of X (does not work for sparse matrices) 'eigen' : force computation via eigendecomposition of X^T X The 'auto' mode is the default and is intended to pick the cheaper option of the two depending upon the shape and format of the training data. store_cv_values : boolean, default=False Flag indicating if the cross-validation values corresponding to each alpha should be stored in the `cv_values_` attribute (see below). This flag is only compatible with `cv=None` (i.e. using Generalized Cross-Validation). Attributes ---------- cv_values_ : array, shape = [n_samples, n_alphas] or \ shape = [n_samples, n_targets, n_alphas], optional Cross-validation values for each alpha (if `store_cv_values=True` and \ `cv=None`). After `fit()` has been called, this attribute will \ contain the mean squared errors (by default) or the values of the \ `{loss,score}_func` function (if provided in the constructor). coef_ : array, shape = [n_features] or [n_targets, n_features] Weight vector(s). intercept_ : float | array, shape = (n_targets,) Independent term in decision function. Set to 0.0 if ``fit_intercept = False``. alpha_ : float Estimated regularization parameter. See also -------- Ridge: Ridge regression RidgeClassifier: Ridge classifier RidgeClassifierCV: Ridge classifier with built-in cross validation """ pass class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV): """Ridge classifier with built-in cross-validation. By default, it performs Generalized Cross-Validation, which is a form of efficient Leave-One-Out cross-validation. Currently, only the n_features > n_samples case is handled efficiently. Read more in the :ref:`User Guide <ridge_regression>`. Parameters ---------- alphas : numpy array of shape [n_alphas] Array of alpha values to try. Small positive values of alpha improve the conditioning of the problem and reduce the variance of the estimates. Alpha corresponds to ``C^-1`` in other linear models such as LogisticRegression or LinearSVC. fit_intercept : boolean Whether to calculate the intercept for this model. If set to false, no intercept will be used in calculations (e.g. data is expected to be already centered). normalize : boolean, optional, default False If True, the regressors X will be normalized before regression. scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. cv : int, cross-validation generator or an iterable, optional Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the efficient Leave-One-Out cross-validation - integer, to specify the number of folds. - An object to be used as a cross-validation generator. - An iterable yielding train/test splits. Refer :ref:`User Guide <cross_validation>` for the various cross-validation strategies that can be used here. class_weight : dict or 'balanced', optional Weights associated with classes in the form ``{class_label: weight}``. If not given, all classes are supposed to have weight one. The "balanced" mode uses the values of y to automatically adjust weights inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))`` Attributes ---------- cv_values_ : array, shape = [n_samples, n_alphas] or \ shape = [n_samples, n_responses, n_alphas], optional Cross-validation values for each alpha (if `store_cv_values=True` and `cv=None`). After `fit()` has been called, this attribute will contain \ the mean squared errors (by default) or the values of the \ `{loss,score}_func` function (if provided in the constructor). coef_ : array, shape = [n_features] or [n_targets, n_features] Weight vector(s). intercept_ : float | array, shape = (n_targets,) Independent term in decision function. Set to 0.0 if ``fit_intercept = False``. alpha_ : float Estimated regularization parameter See also -------- Ridge: Ridge regression RidgeClassifier: Ridge classifier RidgeCV: Ridge regression with built-in cross validation Notes ----- For multi-class classification, n_class classifiers are trained in a one-versus-all approach. Concretely, this is implemented by taking advantage of the multi-variate response support in Ridge. """ def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True, normalize=False, scoring=None, cv=None, class_weight=None): super(RidgeClassifierCV, self).__init__( alphas=alphas, fit_intercept=fit_intercept, normalize=normalize, scoring=scoring, cv=cv) self.class_weight = class_weight def fit(self, X, y, sample_weight=None): """Fit the ridge classifier. Parameters ---------- X : array-like, shape (n_samples, n_features) Training vectors, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples,) Target values. sample_weight : float or numpy array of shape (n_samples,) Sample weight. Returns ------- self : object Returns self. """ self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1) Y = self._label_binarizer.fit_transform(y) if not self._label_binarizer.y_type_.startswith('multilabel'): y = column_or_1d(y, warn=True) if self.class_weight: if sample_weight is None: sample_weight = 1. # modify the sample weights with the corresponding class weight sample_weight = (sample_weight * compute_sample_weight(self.class_weight, y)) _BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight) return self @property def classes_(self): return self._label_binarizer.classes_
bsd-3-clause
dsquareindia/scikit-learn
sklearn/tests/test_cross_validation.py
79
47914
"""Test the cross_validation module""" from __future__ import division import warnings import numpy as np from scipy.sparse import coo_matrix from scipy.sparse import csr_matrix from scipy import stats from sklearn.exceptions import ConvergenceWarning from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_false from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_greater_equal from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_not_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_warns_message from sklearn.utils.testing import assert_raise_message from sklearn.utils.testing import ignore_warnings from sklearn.utils.mocking import CheckingClassifier, MockDataFrame with warnings.catch_warnings(): warnings.simplefilter('ignore') from sklearn import cross_validation as cval from sklearn.datasets import make_regression from sklearn.datasets import load_boston from sklearn.datasets import load_digits from sklearn.datasets import load_iris from sklearn.datasets import make_multilabel_classification from sklearn.metrics import explained_variance_score from sklearn.metrics import make_scorer from sklearn.metrics import precision_score from sklearn.externals import six from sklearn.externals.six.moves import zip from sklearn.linear_model import Ridge from sklearn.multiclass import OneVsRestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.cluster import KMeans from sklearn.preprocessing import Imputer from sklearn.pipeline import Pipeline class MockClassifier(object): """Dummy classifier to test the cross-validation""" def __init__(self, a=0, allow_nd=False): self.a = a self.allow_nd = allow_nd def fit(self, X, Y=None, sample_weight=None, class_prior=None, sparse_sample_weight=None, sparse_param=None, dummy_int=None, dummy_str=None, dummy_obj=None, callback=None): """The dummy arguments are to test that this fit function can accept non-array arguments through cross-validation, such as: - int - str (this is actually array-like) - object - function """ self.dummy_int = dummy_int self.dummy_str = dummy_str self.dummy_obj = dummy_obj if callback is not None: callback(self) if self.allow_nd: X = X.reshape(len(X), -1) if X.ndim >= 3 and not self.allow_nd: raise ValueError('X cannot be d') if sample_weight is not None: assert_true(sample_weight.shape[0] == X.shape[0], 'MockClassifier extra fit_param sample_weight.shape[0]' ' is {0}, should be {1}'.format(sample_weight.shape[0], X.shape[0])) if class_prior is not None: assert_true(class_prior.shape[0] == len(np.unique(y)), 'MockClassifier extra fit_param class_prior.shape[0]' ' is {0}, should be {1}'.format(class_prior.shape[0], len(np.unique(y)))) if sparse_sample_weight is not None: fmt = ('MockClassifier extra fit_param sparse_sample_weight' '.shape[0] is {0}, should be {1}') assert_true(sparse_sample_weight.shape[0] == X.shape[0], fmt.format(sparse_sample_weight.shape[0], X.shape[0])) if sparse_param is not None: fmt = ('MockClassifier extra fit_param sparse_param.shape ' 'is ({0}, {1}), should be ({2}, {3})') assert_true(sparse_param.shape == P_sparse.shape, fmt.format(sparse_param.shape[0], sparse_param.shape[1], P_sparse.shape[0], P_sparse.shape[1])) return self def predict(self, T): if self.allow_nd: T = T.reshape(len(T), -1) return T[:, 0] def score(self, X=None, Y=None): return 1. / (1 + np.abs(self.a)) def get_params(self, deep=False): return {'a': self.a, 'allow_nd': self.allow_nd} X = np.ones((10, 2)) X_sparse = coo_matrix(X) W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))), shape=(10, 1)) P_sparse = coo_matrix(np.eye(5)) # avoid StratifiedKFold's Warning about least populated class in y y = np.arange(10) % 3 ############################################################################## # Tests def check_valid_split(train, test, n_samples=None): # Use python sets to get more informative assertion failure messages train, test = set(train), set(test) # Train and test split should not overlap assert_equal(train.intersection(test), set()) if n_samples is not None: # Check that the union of train an test split cover all the indices assert_equal(train.union(test), set(range(n_samples))) def check_cv_coverage(cv, expected_n_iter=None, n_samples=None): # Check that a all the samples appear at least once in a test fold if expected_n_iter is not None: assert_equal(len(cv), expected_n_iter) else: expected_n_iter = len(cv) collected_test_samples = set() iterations = 0 for train, test in cv: check_valid_split(train, test, n_samples=n_samples) iterations += 1 collected_test_samples.update(test) # Check that the accumulated test samples cover the whole dataset assert_equal(iterations, expected_n_iter) if n_samples is not None: assert_equal(collected_test_samples, set(range(n_samples))) def test_kfold_valueerrors(): # Check that errors are raised if there is not enough samples assert_raises(ValueError, cval.KFold, 3, 4) # Check that a warning is raised if the least populated class has too few # members. y = [3, 3, -1, -1, 3] cv = assert_warns_message(Warning, "The least populated class", cval.StratifiedKFold, y, 3) # Check that despite the warning the folds are still computed even # though all the classes are not necessarily represented at on each # side of the split at each split check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y)) # Check that errors are raised if all n_labels for individual # classes are less than n_folds. y = [3, 3, -1, -1, 2] assert_raises(ValueError, cval.StratifiedKFold, y, 3) # Error when number of folds is <= 1 assert_raises(ValueError, cval.KFold, 2, 0) assert_raises(ValueError, cval.KFold, 2, 1) error_string = ("k-fold cross validation requires at least one" " train / test split") assert_raise_message(ValueError, error_string, cval.StratifiedKFold, y, 0) assert_raise_message(ValueError, error_string, cval.StratifiedKFold, y, 1) # When n is not integer: assert_raises(ValueError, cval.KFold, 2.5, 2) # When n_folds is not integer: assert_raises(ValueError, cval.KFold, 5, 1.5) assert_raises(ValueError, cval.StratifiedKFold, y, 1.5) def test_kfold_indices(): # Check all indices are returned in the test folds kf = cval.KFold(300, 3) check_cv_coverage(kf, expected_n_iter=3, n_samples=300) # Check all indices are returned in the test folds even when equal-sized # folds are not possible kf = cval.KFold(17, 3) check_cv_coverage(kf, expected_n_iter=3, n_samples=17) def test_kfold_no_shuffle(): # Manually check that KFold preserves the data ordering on toy datasets splits = iter(cval.KFold(4, 2)) train, test = next(splits) assert_array_equal(test, [0, 1]) assert_array_equal(train, [2, 3]) train, test = next(splits) assert_array_equal(test, [2, 3]) assert_array_equal(train, [0, 1]) splits = iter(cval.KFold(5, 2)) train, test = next(splits) assert_array_equal(test, [0, 1, 2]) assert_array_equal(train, [3, 4]) train, test = next(splits) assert_array_equal(test, [3, 4]) assert_array_equal(train, [0, 1, 2]) def test_stratified_kfold_no_shuffle(): # Manually check that StratifiedKFold preserves the data ordering as much # as possible on toy datasets in order to avoid hiding sample dependencies # when possible splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2)) train, test = next(splits) assert_array_equal(test, [0, 2]) assert_array_equal(train, [1, 3]) train, test = next(splits) assert_array_equal(test, [1, 3]) assert_array_equal(train, [0, 2]) splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2)) train, test = next(splits) assert_array_equal(test, [0, 1, 3, 4]) assert_array_equal(train, [2, 5, 6]) train, test = next(splits) assert_array_equal(test, [2, 5, 6]) assert_array_equal(train, [0, 1, 3, 4]) def test_stratified_kfold_ratios(): # Check that stratified kfold preserves label ratios in individual splits # Repeat with shuffling turned off and on n_samples = 1000 labels = np.array([4] * int(0.10 * n_samples) + [0] * int(0.89 * n_samples) + [1] * int(0.01 * n_samples)) for shuffle in [False, True]: for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle): assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10, 2) assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89, 2) assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01, 2) assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2) assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2) assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2) def test_kfold_balance(): # Check that KFold returns folds with balanced sizes for kf in [cval.KFold(i, 5) for i in range(11, 17)]: sizes = [] for _, test in kf: sizes.append(len(test)) assert_true((np.max(sizes) - np.min(sizes)) <= 1) assert_equal(np.sum(sizes), kf.n) def test_stratifiedkfold_balance(): # Check that KFold returns folds with balanced sizes (only when # stratification is possible) # Repeat with shuffling turned off and on labels = [0] * 3 + [1] * 14 for shuffle in [False, True]: for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle) for i in range(11, 17)]: sizes = [] for _, test in skf: sizes.append(len(test)) assert_true((np.max(sizes) - np.min(sizes)) <= 1) assert_equal(np.sum(sizes), skf.n) def test_shuffle_kfold(): # Check the indices are shuffled properly, and that all indices are # returned in the different test folds kf = cval.KFold(300, 3, shuffle=True, random_state=0) ind = np.arange(300) all_folds = None for train, test in kf: assert_true(np.any(np.arange(100) != ind[test])) assert_true(np.any(np.arange(100, 200) != ind[test])) assert_true(np.any(np.arange(200, 300) != ind[test])) if all_folds is None: all_folds = ind[test].copy() else: all_folds = np.concatenate((all_folds, ind[test])) all_folds.sort() assert_array_equal(all_folds, ind) def test_shuffle_stratifiedkfold(): # Check that shuffling is happening when requested, and for proper # sample coverage labels = [0] * 20 + [1] * 20 kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0)) kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1)) for (_, test0), (_, test1) in zip(kf0, kf1): assert_true(set(test0) != set(test1)) check_cv_coverage(kf0, expected_n_iter=5, n_samples=40) def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372 # The digits samples are dependent: they are apparently grouped by authors # although we don't have any information on the groups segment locations # for this data. We can highlight this fact be computing k-fold cross- # validation with and without shuffling: we observe that the shuffling case # wrongly makes the IID assumption and is therefore too optimistic: it # estimates a much higher accuracy (around 0.96) than the non # shuffling variant (around 0.86). digits = load_digits() X, y = digits.data[:800], digits.target[:800] model = SVC(C=10, gamma=0.005) n = len(y) cv = cval.KFold(n, 5, shuffle=False) mean_score = cval.cross_val_score(model, X, y, cv=cv).mean() assert_greater(0.88, mean_score) assert_greater(mean_score, 0.85) # Shuffling the data artificially breaks the dependency and hides the # overfitting of the model with regards to the writing style of the authors # by yielding a seriously overestimated score: cv = cval.KFold(n, 5, shuffle=True, random_state=0) mean_score = cval.cross_val_score(model, X, y, cv=cv).mean() assert_greater(mean_score, 0.95) cv = cval.KFold(n, 5, shuffle=True, random_state=1) mean_score = cval.cross_val_score(model, X, y, cv=cv).mean() assert_greater(mean_score, 0.95) # Similarly, StratifiedKFold should try to shuffle the data as little # as possible (while respecting the balanced class constraints) # and thus be able to detect the dependency by not overestimating # the CV score either. As the digits dataset is approximately balanced # the estimated mean score is close to the score measured with # non-shuffled KFold cv = cval.StratifiedKFold(y, 5) mean_score = cval.cross_val_score(model, X, y, cv=cv).mean() assert_greater(0.88, mean_score) assert_greater(mean_score, 0.85) def test_label_kfold(): rng = np.random.RandomState(0) # Parameters of the test n_labels = 15 n_samples = 1000 n_folds = 5 # Construct the test data tolerance = 0.05 * n_samples # 5 percent error allowed labels = rng.randint(0, n_labels, n_samples) folds = cval.LabelKFold(labels, n_folds=n_folds).idxs ideal_n_labels_per_fold = n_samples // n_folds # Check that folds have approximately the same size assert_equal(len(folds), len(labels)) for i in np.unique(folds): assert_greater_equal(tolerance, abs(sum(folds == i) - ideal_n_labels_per_fold)) # Check that each label appears only in 1 fold for label in np.unique(labels): assert_equal(len(np.unique(folds[labels == label])), 1) # Check that no label is on both sides of the split labels = np.asarray(labels, dtype=object) for train, test in cval.LabelKFold(labels, n_folds=n_folds): assert_equal(len(np.intersect1d(labels[train], labels[test])), 0) # Construct the test data labels = ['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean', 'Francis', 'Robert', 'Michel', 'Rachel', 'Lois', 'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean', 'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix', 'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky', 'Madmood', 'Cary', 'Mary', 'Alexandre', 'David', 'Francis', 'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia'] labels = np.asarray(labels, dtype=object) n_labels = len(np.unique(labels)) n_samples = len(labels) n_folds = 5 tolerance = 0.05 * n_samples # 5 percent error allowed folds = cval.LabelKFold(labels, n_folds=n_folds).idxs ideal_n_labels_per_fold = n_samples // n_folds # Check that folds have approximately the same size assert_equal(len(folds), len(labels)) for i in np.unique(folds): assert_greater_equal(tolerance, abs(sum(folds == i) - ideal_n_labels_per_fold)) # Check that each label appears only in 1 fold for label in np.unique(labels): assert_equal(len(np.unique(folds[labels == label])), 1) # Check that no label is on both sides of the split for train, test in cval.LabelKFold(labels, n_folds=n_folds): assert_equal(len(np.intersect1d(labels[train], labels[test])), 0) # Should fail if there are more folds than labels labels = np.array([1, 1, 1, 2, 2]) assert_raises(ValueError, cval.LabelKFold, labels, n_folds=3) def test_shuffle_split(): ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0) ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0) ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0) for typ in six.integer_types: ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0) for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4): assert_array_equal(t1[0], t2[0]) assert_array_equal(t2[0], t3[0]) assert_array_equal(t3[0], t4[0]) assert_array_equal(t1[1], t2[1]) assert_array_equal(t2[1], t3[1]) assert_array_equal(t3[1], t4[1]) def test_stratified_shuffle_split_init(): y = np.asarray([0, 1, 1, 1, 2, 2, 2]) # Check that error is raised if there is a class with only one sample assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2) # Check that error is raised if the test set size is smaller than n_classes assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2) # Check that error is raised if the train set size is smaller than # n_classes assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2) y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2]) # Check that errors are raised if there is not enough samples assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6) assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6) assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8) # Train size or test size too small assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2) assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2) def test_stratified_shuffle_split_iter(): ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]), np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]), np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2] * 2), np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]), np.array([-1] * 800 + [1] * 50) ] for y in ys: sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33, random_state=0) test_size = np.ceil(0.33 * len(y)) train_size = len(y) - test_size for train, test in sss: assert_array_equal(np.unique(y[train]), np.unique(y[test])) # Checks if folds keep classes proportions p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1]) / float(len(y[train]))) p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1]) / float(len(y[test]))) assert_array_almost_equal(p_train, p_test, 1) assert_equal(len(train) + len(test), y.size) assert_equal(len(train), train_size) assert_equal(len(test), test_size) assert_array_equal(np.lib.arraysetops.intersect1d(train, test), []) def test_stratified_shuffle_split_even(): # Test the StratifiedShuffleSplit, indices are drawn with a # equal chance n_folds = 5 n_iter = 1000 def assert_counts_are_ok(idx_counts, p): # Here we test that the distribution of the counts # per index is close enough to a binomial threshold = 0.05 / n_splits bf = stats.binom(n_splits, p) for count in idx_counts: p = bf.pmf(count) assert_true(p > threshold, "An index is not drawn with chance corresponding " "to even draws") for n_samples in (6, 22): labels = np.array((n_samples // 2) * [0, 1]) splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter, test_size=1. / n_folds, random_state=0) train_counts = [0] * n_samples test_counts = [0] * n_samples n_splits = 0 for train, test in splits: n_splits += 1 for counter, ids in [(train_counts, train), (test_counts, test)]: for id in ids: counter[id] += 1 assert_equal(n_splits, n_iter) assert_equal(len(train), splits.n_train) assert_equal(len(test), splits.n_test) assert_equal(len(set(train).intersection(test)), 0) label_counts = np.unique(labels) assert_equal(splits.test_size, 1.0 / n_folds) assert_equal(splits.n_train + splits.n_test, len(labels)) assert_equal(len(label_counts), 2) ex_test_p = float(splits.n_test) / n_samples ex_train_p = float(splits.n_train) / n_samples assert_counts_are_ok(train_counts, ex_train_p) assert_counts_are_ok(test_counts, ex_test_p) def test_stratified_shuffle_split_overlap_train_test_bug(): # See https://github.com/scikit-learn/scikit-learn/issues/6121 for # the original bug report labels = [0, 1, 2, 3] * 3 + [4, 5] * 5 splits = cval.StratifiedShuffleSplit(labels, n_iter=1, test_size=0.5, random_state=0) train, test = next(iter(splits)) assert_array_equal(np.intersect1d(train, test), []) def test_predefinedsplit_with_kfold_split(): # Check that PredefinedSplit can reproduce a split generated by Kfold. folds = -1 * np.ones(10) kf_train = [] kf_test = [] for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)): kf_train.append(train_ind) kf_test.append(test_ind) folds[test_ind] = i ps_train = [] ps_test = [] ps = cval.PredefinedSplit(folds) for train_ind, test_ind in ps: ps_train.append(train_ind) ps_test.append(test_ind) assert_array_equal(ps_train, kf_train) assert_array_equal(ps_test, kf_test) def test_label_shuffle_split(): ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]), np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]), np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]), np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]), ] for y in ys: n_iter = 6 test_size = 1. / 3 slo = cval.LabelShuffleSplit(y, n_iter, test_size=test_size, random_state=0) # Make sure the repr works repr(slo) # Test that the length is correct assert_equal(len(slo), n_iter) y_unique = np.unique(y) for train, test in slo: # First test: no train label is in the test set and vice versa y_train_unique = np.unique(y[train]) y_test_unique = np.unique(y[test]) assert_false(np.any(np.in1d(y[train], y_test_unique))) assert_false(np.any(np.in1d(y[test], y_train_unique))) # Second test: train and test add up to all the data assert_equal(y[train].size + y[test].size, y.size) # Third test: train and test are disjoint assert_array_equal(np.intersect1d(train, test), []) # Fourth test: # unique train and test labels are correct, # +- 1 for rounding error assert_true(abs(len(y_test_unique) - round(test_size * len(y_unique))) <= 1) assert_true(abs(len(y_train_unique) - round((1.0 - test_size) * len(y_unique))) <= 1) def test_leave_label_out_changing_labels(): # Check that LeaveOneLabelOut and LeavePLabelOut work normally if # the labels variable is changed before calling __iter__ labels = np.array([0, 1, 2, 1, 1, 2, 0, 0]) labels_changing = np.array(labels, copy=True) lolo = cval.LeaveOneLabelOut(labels) lolo_changing = cval.LeaveOneLabelOut(labels_changing) lplo = cval.LeavePLabelOut(labels, p=2) lplo_changing = cval.LeavePLabelOut(labels_changing, p=2) labels_changing[:] = 0 for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]: for (train, test), (train_chan, test_chan) in zip(llo, llo_changing): assert_array_equal(train, train_chan) assert_array_equal(test, test_chan) def test_cross_val_score(): clf = MockClassifier() for a in range(-10, 10): clf.a = a # Smoke test scores = cval.cross_val_score(clf, X, y) assert_array_equal(scores, clf.score(X, y)) # test with multioutput y scores = cval.cross_val_score(clf, X_sparse, X) assert_array_equal(scores, clf.score(X_sparse, X)) scores = cval.cross_val_score(clf, X_sparse, y) assert_array_equal(scores, clf.score(X_sparse, y)) # test with multioutput y scores = cval.cross_val_score(clf, X_sparse, X) assert_array_equal(scores, clf.score(X_sparse, X)) # test with X and y as list list_check = lambda x: isinstance(x, list) clf = CheckingClassifier(check_X=list_check) scores = cval.cross_val_score(clf, X.tolist(), y.tolist()) clf = CheckingClassifier(check_y=list_check) scores = cval.cross_val_score(clf, X, y.tolist()) assert_raises(ValueError, cval.cross_val_score, clf, X, y, scoring="sklearn") # test with 3d X and X_3d = X[:, :, np.newaxis] clf = MockClassifier(allow_nd=True) scores = cval.cross_val_score(clf, X_3d, y) clf = MockClassifier(allow_nd=False) assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y) def test_cross_val_score_pandas(): # check cross_val_score doesn't destroy pandas dataframe types = [(MockDataFrame, MockDataFrame)] try: from pandas import Series, DataFrame types.append((Series, DataFrame)) except ImportError: pass for TargetType, InputFeatureType in types: # X dataframe, y series X_df, y_ser = InputFeatureType(X), TargetType(y) check_df = lambda x: isinstance(x, InputFeatureType) check_series = lambda x: isinstance(x, TargetType) clf = CheckingClassifier(check_X=check_df, check_y=check_series) cval.cross_val_score(clf, X_df, y_ser) def test_cross_val_score_mask(): # test that cross_val_score works with boolean masks svm = SVC(kernel="linear") iris = load_iris() X, y = iris.data, iris.target cv_indices = cval.KFold(len(y), 5) scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices) cv_indices = cval.KFold(len(y), 5) cv_masks = [] for train, test in cv_indices: mask_train = np.zeros(len(y), dtype=np.bool) mask_test = np.zeros(len(y), dtype=np.bool) mask_train[train] = 1 mask_test[test] = 1 cv_masks.append((train, test)) scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks) assert_array_equal(scores_indices, scores_masks) def test_cross_val_score_precomputed(): # test for svm with precomputed kernel svm = SVC(kernel="precomputed") iris = load_iris() X, y = iris.data, iris.target linear_kernel = np.dot(X, X.T) score_precomputed = cval.cross_val_score(svm, linear_kernel, y) svm = SVC(kernel="linear") score_linear = cval.cross_val_score(svm, X, y) assert_array_equal(score_precomputed, score_linear) # Error raised for non-square X svm = SVC(kernel="precomputed") assert_raises(ValueError, cval.cross_val_score, svm, X, y) # test error is raised when the precomputed kernel is not array-like # or sparse assert_raises(ValueError, cval.cross_val_score, svm, linear_kernel.tolist(), y) def test_cross_val_score_fit_params(): clf = MockClassifier() n_samples = X.shape[0] n_classes = len(np.unique(y)) DUMMY_INT = 42 DUMMY_STR = '42' DUMMY_OBJ = object() def assert_fit_params(clf): # Function to test that the values are passed correctly to the # classifier arguments for non-array type assert_equal(clf.dummy_int, DUMMY_INT) assert_equal(clf.dummy_str, DUMMY_STR) assert_equal(clf.dummy_obj, DUMMY_OBJ) fit_params = {'sample_weight': np.ones(n_samples), 'class_prior': np.ones(n_classes) / n_classes, 'sparse_sample_weight': W_sparse, 'sparse_param': P_sparse, 'dummy_int': DUMMY_INT, 'dummy_str': DUMMY_STR, 'dummy_obj': DUMMY_OBJ, 'callback': assert_fit_params} cval.cross_val_score(clf, X, y, fit_params=fit_params) def test_cross_val_score_score_func(): clf = MockClassifier() _score_func_args = [] def score_func(y_test, y_predict): _score_func_args.append((y_test, y_predict)) return 1.0 with warnings.catch_warnings(record=True): scoring = make_scorer(score_func) score = cval.cross_val_score(clf, X, y, scoring=scoring) assert_array_equal(score, [1.0, 1.0, 1.0]) assert len(_score_func_args) == 3 def test_cross_val_score_errors(): class BrokenEstimator: pass assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X) def test_train_test_split_errors(): assert_raises(ValueError, cval.train_test_split) assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1) assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6, train_size=0.6) assert_raises(ValueError, cval.train_test_split, range(3), test_size=np.float32(0.6), train_size=np.float32(0.6)) assert_raises(ValueError, cval.train_test_split, range(3), test_size="wrong_type") assert_raises(ValueError, cval.train_test_split, range(3), test_size=2, train_size=4) assert_raises(TypeError, cval.train_test_split, range(3), some_argument=1.1) assert_raises(ValueError, cval.train_test_split, range(3), range(42)) def test_train_test_split(): X = np.arange(100).reshape((10, 10)) X_s = coo_matrix(X) y = np.arange(10) # simple test split = cval.train_test_split(X, y, test_size=None, train_size=.5) X_train, X_test, y_train, y_test = split assert_equal(len(y_test), len(y_train)) # test correspondence of X and y assert_array_equal(X_train[:, 0], y_train * 10) assert_array_equal(X_test[:, 0], y_test * 10) # conversion of lists to arrays (deprecated?) with warnings.catch_warnings(record=True): split = cval.train_test_split(X, X_s, y.tolist()) X_train, X_test, X_s_train, X_s_test, y_train, y_test = split assert_array_equal(X_train, X_s_train.toarray()) assert_array_equal(X_test, X_s_test.toarray()) # don't convert lists to anything else by default split = cval.train_test_split(X, X_s, y.tolist()) X_train, X_test, X_s_train, X_s_test, y_train, y_test = split assert_true(isinstance(y_train, list)) assert_true(isinstance(y_test, list)) # allow nd-arrays X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2) y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11) split = cval.train_test_split(X_4d, y_3d) assert_equal(split[0].shape, (7, 5, 3, 2)) assert_equal(split[1].shape, (3, 5, 3, 2)) assert_equal(split[2].shape, (7, 7, 11)) assert_equal(split[3].shape, (3, 7, 11)) # test stratification option y = np.array([1, 1, 1, 1, 2, 2, 2, 2]) for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75], [2, 4, 2, 4, 6]): train, test = cval.train_test_split(y, test_size=test_size, stratify=y, random_state=0) assert_equal(len(test), exp_test_size) assert_equal(len(test) + len(train), len(y)) # check the 1:1 ratio of ones and twos in the data is preserved assert_equal(np.sum(train == 1), np.sum(train == 2)) def train_test_split_pandas(): # check cross_val_score doesn't destroy pandas dataframe types = [MockDataFrame] try: from pandas import DataFrame types.append(DataFrame) except ImportError: pass for InputFeatureType in types: # X dataframe X_df = InputFeatureType(X) X_train, X_test = cval.train_test_split(X_df) assert_true(isinstance(X_train, InputFeatureType)) assert_true(isinstance(X_test, InputFeatureType)) def train_test_split_mock_pandas(): # X mock dataframe X_df = MockDataFrame(X) X_train, X_test = cval.train_test_split(X_df) assert_true(isinstance(X_train, MockDataFrame)) assert_true(isinstance(X_test, MockDataFrame)) def test_cross_val_score_with_score_func_classification(): iris = load_iris() clf = SVC(kernel='linear') # Default score (should be the accuracy score) scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5) assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2) # Correct classification score (aka. zero / one score) - should be the # same as the default estimator score zo_scores = cval.cross_val_score(clf, iris.data, iris.target, scoring="accuracy", cv=5) assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2) # F1 score (class are balanced so f1_score should be equal to zero/one # score f1_scores = cval.cross_val_score(clf, iris.data, iris.target, scoring="f1_weighted", cv=5) assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2) def test_cross_val_score_with_score_func_regression(): X, y = make_regression(n_samples=30, n_features=20, n_informative=5, random_state=0) reg = Ridge() # Default score of the Ridge regression estimator scores = cval.cross_val_score(reg, X, y, cv=5) assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2) # R2 score (aka. determination coefficient) - should be the # same as the default estimator score r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5) assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2) # Mean squared error; this is a loss function, so "scores" are negative neg_mse_scores = cval.cross_val_score(reg, X, y, cv=5, scoring="neg_mean_squared_error") expected_neg_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99]) assert_array_almost_equal(neg_mse_scores, expected_neg_mse, 2) # Explained variance scoring = make_scorer(explained_variance_score) ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring) assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2) def test_permutation_score(): iris = load_iris() X = iris.data X_sparse = coo_matrix(X) y = iris.target svm = SVC(kernel='linear') cv = cval.StratifiedKFold(y, 2) score, scores, pvalue = cval.permutation_test_score( svm, X, y, n_permutations=30, cv=cv, scoring="accuracy") assert_greater(score, 0.9) assert_almost_equal(pvalue, 0.0, 1) score_label, _, pvalue_label = cval.permutation_test_score( svm, X, y, n_permutations=30, cv=cv, scoring="accuracy", labels=np.ones(y.size), random_state=0) assert_true(score_label == score) assert_true(pvalue_label == pvalue) # check that we obtain the same results with a sparse representation svm_sparse = SVC(kernel='linear') cv_sparse = cval.StratifiedKFold(y, 2) score_label, _, pvalue_label = cval.permutation_test_score( svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse, scoring="accuracy", labels=np.ones(y.size), random_state=0) assert_true(score_label == score) assert_true(pvalue_label == pvalue) # test with custom scoring object def custom_score(y_true, y_pred): return (((y_true == y_pred).sum() - (y_true != y_pred).sum()) / y_true.shape[0]) scorer = make_scorer(custom_score) score, _, pvalue = cval.permutation_test_score( svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0) assert_almost_equal(score, .93, 2) assert_almost_equal(pvalue, 0.01, 3) # set random y y = np.mod(np.arange(len(y)), 3) score, scores, pvalue = cval.permutation_test_score( svm, X, y, n_permutations=30, cv=cv, scoring="accuracy") assert_less(score, 0.5) assert_greater(pvalue, 0.2) def test_cross_val_generator_with_indices(): X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) y = np.array([1, 1, 2, 2]) labels = np.array([1, 2, 3, 4]) # explicitly passing indices value is deprecated loo = cval.LeaveOneOut(4) lpo = cval.LeavePOut(4, 2) kf = cval.KFold(4, 2) skf = cval.StratifiedKFold(y, 2) lolo = cval.LeaveOneLabelOut(labels) lopo = cval.LeavePLabelOut(labels, 2) ps = cval.PredefinedSplit([1, 1, 2, 2]) ss = cval.ShuffleSplit(2) for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]: for train, test in cv: assert_not_equal(np.asarray(train).dtype.kind, 'b') assert_not_equal(np.asarray(train).dtype.kind, 'b') X[train], X[test] y[train], y[test] @ignore_warnings def test_cross_val_generator_with_default_indices(): X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) y = np.array([1, 1, 2, 2]) labels = np.array([1, 2, 3, 4]) loo = cval.LeaveOneOut(4) lpo = cval.LeavePOut(4, 2) kf = cval.KFold(4, 2) skf = cval.StratifiedKFold(y, 2) lolo = cval.LeaveOneLabelOut(labels) lopo = cval.LeavePLabelOut(labels, 2) ss = cval.ShuffleSplit(2) ps = cval.PredefinedSplit([1, 1, 2, 2]) for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]: for train, test in cv: assert_not_equal(np.asarray(train).dtype.kind, 'b') assert_not_equal(np.asarray(train).dtype.kind, 'b') X[train], X[test] y[train], y[test] def test_shufflesplit_errors(): assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0) assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0) assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1, train_size=0.95) assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11) assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10) assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3) assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j) assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None, train_size=None) def test_shufflesplit_reproducible(): # Check that iterating twice on the ShuffleSplit gives the same # sequence of train-test when the random_state is given ss = cval.ShuffleSplit(10, random_state=21) assert_array_equal(list(a for a, b in ss), list(a for a, b in ss)) def test_safe_split_with_precomputed_kernel(): clf = SVC() clfp = SVC(kernel="precomputed") iris = load_iris() X, y = iris.data, iris.target K = np.dot(X, X.T) cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0) tr, te = list(cv)[0] X_tr, y_tr = cval._safe_split(clf, X, y, tr) K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr) assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T)) X_te, y_te = cval._safe_split(clf, X, y, te, tr) K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr) assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T)) def test_cross_val_score_allow_nans(): # Check that cross_val_score allows input data with NaNs X = np.arange(200, dtype=np.float64).reshape(10, -1) X[2, :] = np.nan y = np.repeat([0, 1], X.shape[0] / 2) p = Pipeline([ ('imputer', Imputer(strategy='mean', missing_values='NaN')), ('classifier', MockClassifier()), ]) cval.cross_val_score(p, X, y, cv=5) def test_train_test_split_allow_nans(): # Check that train_test_split allows input data with NaNs X = np.arange(200, dtype=np.float64).reshape(10, -1) X[2, :] = np.nan y = np.repeat([0, 1], X.shape[0] / 2) cval.train_test_split(X, y, test_size=0.2, random_state=42) def test_permutation_test_score_allow_nans(): # Check that permutation_test_score allows input data with NaNs X = np.arange(200, dtype=np.float64).reshape(10, -1) X[2, :] = np.nan y = np.repeat([0, 1], X.shape[0] / 2) p = Pipeline([ ('imputer', Imputer(strategy='mean', missing_values='NaN')), ('classifier', MockClassifier()), ]) cval.permutation_test_score(p, X, y, cv=5) def test_check_cv_return_types(): X = np.ones((9, 2)) cv = cval.check_cv(3, X, classifier=False) assert_true(isinstance(cv, cval.KFold)) y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1]) cv = cval.check_cv(3, X, y_binary, classifier=True) assert_true(isinstance(cv, cval.StratifiedKFold)) y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2]) cv = cval.check_cv(3, X, y_multiclass, classifier=True) assert_true(isinstance(cv, cval.StratifiedKFold)) X = np.ones((5, 2)) y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]] cv = cval.check_cv(3, X, y_multilabel, classifier=True) assert_true(isinstance(cv, cval.KFold)) y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]]) cv = cval.check_cv(3, X, y_multioutput, classifier=True) assert_true(isinstance(cv, cval.KFold)) def test_cross_val_score_multilabel(): X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1], [-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]]) y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1], [0, 1], [1, 0], [1, 1], [1, 0], [0, 0]]) clf = KNeighborsClassifier(n_neighbors=1) scoring_micro = make_scorer(precision_score, average='micro') scoring_macro = make_scorer(precision_score, average='macro') scoring_samples = make_scorer(precision_score, average='samples') score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5) score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5) score_samples = cval.cross_val_score(clf, X, y, scoring=scoring_samples, cv=5) assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3]) assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4]) assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4]) def test_cross_val_predict(): boston = load_boston() X, y = boston.data, boston.target cv = cval.KFold(len(boston.target)) est = Ridge() # Naive loop (should be same as cross_val_predict): preds2 = np.zeros_like(y) for train, test in cv: est.fit(X[train], y[train]) preds2[test] = est.predict(X[test]) preds = cval.cross_val_predict(est, X, y, cv=cv) assert_array_almost_equal(preds, preds2) preds = cval.cross_val_predict(est, X, y) assert_equal(len(preds), len(y)) cv = cval.LeaveOneOut(len(y)) preds = cval.cross_val_predict(est, X, y, cv=cv) assert_equal(len(preds), len(y)) Xsp = X.copy() Xsp *= (Xsp > np.median(Xsp)) Xsp = coo_matrix(Xsp) preds = cval.cross_val_predict(est, Xsp, y) assert_array_almost_equal(len(preds), len(y)) preds = cval.cross_val_predict(KMeans(), X) assert_equal(len(preds), len(y)) def bad_cv(): for i in range(4): yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8]) assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv()) def test_cross_val_predict_input_types(): clf = Ridge() # Smoke test predictions = cval.cross_val_predict(clf, X, y) assert_equal(predictions.shape, (10,)) # test with multioutput y with ignore_warnings(category=ConvergenceWarning): predictions = cval.cross_val_predict(clf, X_sparse, X) assert_equal(predictions.shape, (10, 2)) predictions = cval.cross_val_predict(clf, X_sparse, y) assert_array_equal(predictions.shape, (10,)) # test with multioutput y with ignore_warnings(category=ConvergenceWarning): predictions = cval.cross_val_predict(clf, X_sparse, X) assert_array_equal(predictions.shape, (10, 2)) # test with X and y as list list_check = lambda x: isinstance(x, list) clf = CheckingClassifier(check_X=list_check) predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist()) clf = CheckingClassifier(check_y=list_check) predictions = cval.cross_val_predict(clf, X, y.tolist()) # test with 3d X and X_3d = X[:, :, np.newaxis] check_3d = lambda x: x.ndim == 3 clf = CheckingClassifier(check_X=check_3d) predictions = cval.cross_val_predict(clf, X_3d, y) assert_array_equal(predictions.shape, (10,)) def test_cross_val_predict_pandas(): # check cross_val_score doesn't destroy pandas dataframe types = [(MockDataFrame, MockDataFrame)] try: from pandas import Series, DataFrame types.append((Series, DataFrame)) except ImportError: pass for TargetType, InputFeatureType in types: # X dataframe, y series X_df, y_ser = InputFeatureType(X), TargetType(y) check_df = lambda x: isinstance(x, InputFeatureType) check_series = lambda x: isinstance(x, TargetType) clf = CheckingClassifier(check_X=check_df, check_y=check_series) cval.cross_val_predict(clf, X_df, y_ser) def test_sparse_fit_params(): iris = load_iris() X, y = iris.data, iris.target clf = MockClassifier() fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))} a = cval.cross_val_score(clf, X, y, fit_params=fit_params) assert_array_equal(a, np.ones(3)) def test_check_is_partition(): p = np.arange(100) assert_true(cval._check_is_partition(p, 100)) assert_false(cval._check_is_partition(np.delete(p, 23), 100)) p[0] = 23 assert_false(cval._check_is_partition(p, 100)) def test_cross_val_predict_sparse_prediction(): # check that cross_val_predict gives same result for sparse and dense input X, y = make_multilabel_classification(n_classes=2, n_labels=1, allow_unlabeled=False, return_indicator=True, random_state=1) X_sparse = csr_matrix(X) y_sparse = csr_matrix(y) classif = OneVsRestClassifier(SVC(kernel='linear')) preds = cval.cross_val_predict(classif, X, y, cv=10) preds_sparse = cval.cross_val_predict(classif, X_sparse, y_sparse, cv=10) preds_sparse = preds_sparse.toarray() assert_array_almost_equal(preds_sparse, preds)
bsd-3-clause
tiagofrepereira2012/tensorflow
tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined_test.py
52
69800
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for DNNLinearCombinedEstimators.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import json import tempfile import numpy as np from tensorflow.contrib.layers.python.layers import feature_column from tensorflow.contrib.learn.python.learn import experiment from tensorflow.contrib.learn.python.learn.datasets import base from tensorflow.contrib.learn.python.learn.estimators import _sklearn from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils from tensorflow.contrib.learn.python.learn.estimators import head as head_lib from tensorflow.contrib.learn.python.learn.estimators import model_fn from tensorflow.contrib.learn.python.learn.estimators import run_config from tensorflow.contrib.learn.python.learn.estimators import test_data from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec from tensorflow.contrib.metrics.python.ops import metric_ops from tensorflow.python.feature_column import feature_column as fc_core from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.ops import array_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.losses import losses from tensorflow.python.platform import test from tensorflow.python.training import adagrad from tensorflow.python.training import ftrl from tensorflow.python.training import input as input_lib from tensorflow.python.training import learning_rate_decay from tensorflow.python.training import monitored_session from tensorflow.python.training import server_lib from tensorflow.python.training import session_run_hook from tensorflow.python.training import sync_replicas_optimizer from tensorflow.python.training import training_util def _assert_metrics_in_range(keys, metrics): epsilon = 0.00001 # Added for floating point edge cases. for key in keys: estimator_test_utils.assert_in_range(0.0 - epsilon, 1.0 + epsilon, key, metrics) class _CheckCallsHead(head_lib.Head): """Head that checks whether head_ops is called.""" def __init__(self): self._head_ops_called_times = 0 @property def logits_dimension(self): return 1 def create_model_fn_ops( self, mode, features, labels=None, train_op_fn=None, logits=None, logits_input=None, scope=None): """See `_Head`.""" self._head_ops_called_times += 1 loss = losses.mean_squared_error(labels, logits) return model_fn.ModelFnOps( mode, predictions={'loss': loss}, loss=loss, train_op=train_op_fn(loss), eval_metric_ops={'loss': loss}) @property def head_ops_called_times(self): return self._head_ops_called_times class _StepCounterHook(session_run_hook.SessionRunHook): """Counts the number of training steps.""" def __init__(self): self._steps = 0 def after_run(self, run_context, run_values): del run_context, run_values self._steps += 1 @property def steps(self): return self._steps class EmbeddingMultiplierTest(test.TestCase): """dnn_model_fn tests.""" def testRaisesNonEmbeddingColumn(self): one_hot_language = feature_column.one_hot_column( feature_column.sparse_column_with_hash_bucket('language', 10)) params = { 'dnn_feature_columns': [one_hot_language], 'head': head_lib.multi_class_head(2), 'dnn_hidden_units': [1], # Set lr mult to 0. to keep embeddings constant. 'embedding_lr_multipliers': { one_hot_language: 0.0 }, 'dnn_optimizer': 'Adagrad', } features = { 'language': sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [1, 0], [2, 0]], dense_shape=[3, 1]), } labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32) with self.assertRaisesRegexp(ValueError, 'can only be defined for embedding columns'): dnn_linear_combined._dnn_linear_combined_model_fn(features, labels, model_fn.ModeKeys.TRAIN, params) def testMultipliesGradient(self): embedding_language = feature_column.embedding_column( feature_column.sparse_column_with_hash_bucket('language', 10), dimension=1, initializer=init_ops.constant_initializer(0.1)) embedding_wire = feature_column.embedding_column( feature_column.sparse_column_with_hash_bucket('wire', 10), dimension=1, initializer=init_ops.constant_initializer(0.1)) params = { 'dnn_feature_columns': [embedding_language, embedding_wire], 'head': head_lib.multi_class_head(2), 'dnn_hidden_units': [1], # Set lr mult to 0. to keep language embeddings constant, whereas wire # embeddings will be trained. 'embedding_lr_multipliers': { embedding_language: 0.0 }, 'dnn_optimizer': 'Adagrad', } with ops.Graph().as_default(): features = { 'language': sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [1, 0], [2, 0]], dense_shape=[3, 1]), 'wire': sparse_tensor.SparseTensor( values=['omar', 'stringer', 'marlo'], indices=[[0, 0], [1, 0], [2, 0]], dense_shape=[3, 1]), } labels = constant_op.constant([[1], [0], [0]], dtype=dtypes.int32) training_util.create_global_step() model_ops = dnn_linear_combined._dnn_linear_combined_model_fn( features, labels, model_fn.ModeKeys.TRAIN, params) with monitored_session.MonitoredSession() as sess: language_var = dnn_linear_combined._get_embedding_variable( embedding_language, 'dnn', 'dnn/input_from_feature_columns') language_initial_value = sess.run(language_var) for _ in range(2): _, language_value = sess.run([model_ops.train_op, language_var]) self.assertAllClose(language_value, language_initial_value) # We could also test that wire_value changed, but that test would be flaky. class DNNLinearCombinedEstimatorTest(test.TestCase): def testEstimatorContract(self): estimator_test_utils.assert_estimator_contract( self, dnn_linear_combined.DNNLinearCombinedEstimator) def testNoFeatureColumns(self): with self.assertRaisesRegexp( ValueError, 'Either linear_feature_columns or dnn_feature_columns must be defined'): dnn_linear_combined.DNNLinearCombinedEstimator( head=_CheckCallsHead(), linear_feature_columns=None, dnn_feature_columns=None, dnn_hidden_units=[3, 3]) def testCheckCallsHead(self): """Tests binary classification using matrix data as input.""" head = _CheckCallsHead() iris = test_data.prepare_iris_data_for_logistic_regression() cont_features = [ feature_column.real_valued_column('feature', dimension=4)] bucketized_feature = [feature_column.bucketized_column( cont_features[0], test_data.get_quantile_based_buckets(iris.data, 10))] estimator = dnn_linear_combined.DNNLinearCombinedEstimator( head, linear_feature_columns=bucketized_feature, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3]) estimator.fit(input_fn=test_data.iris_input_multiclass_fn, steps=10) self.assertEqual(1, head.head_ops_called_times) estimator.evaluate(input_fn=test_data.iris_input_multiclass_fn, steps=10) self.assertEqual(2, head.head_ops_called_times) estimator.predict(input_fn=test_data.iris_input_multiclass_fn) self.assertEqual(3, head.head_ops_called_times) class DNNLinearCombinedClassifierTest(test.TestCase): def testEstimatorContract(self): estimator_test_utils.assert_estimator_contract( self, dnn_linear_combined.DNNLinearCombinedClassifier) def testExperimentIntegration(self): cont_features = [feature_column.real_valued_column('feature', dimension=4)] exp = experiment.Experiment( estimator=dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=cont_features, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3]), train_input_fn=test_data.iris_input_logistic_fn, eval_input_fn=test_data.iris_input_logistic_fn) exp.test() def testNoFeatureColumns(self): with self.assertRaisesRegexp( ValueError, 'Either linear_feature_columns or dnn_feature_columns must be defined'): dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=None, dnn_feature_columns=None, dnn_hidden_units=[3, 3]) def testNoDnnHiddenUnits(self): def _input_fn(): return { 'age': constant_op.constant([1]), 'language': sparse_tensor.SparseTensor( values=['english'], indices=[[0, 0]], dense_shape=[1, 1]) }, constant_op.constant([[1]]) language = feature_column.sparse_column_with_hash_bucket('language', 100) age = feature_column.real_valued_column('age') with self.assertRaisesRegexp( ValueError, 'dnn_hidden_units must be defined when dnn_feature_columns is ' 'specified'): classifier = dnn_linear_combined.DNNLinearCombinedClassifier( dnn_feature_columns=[age, language]) classifier.fit(input_fn=_input_fn, steps=2) def testSyncReplicasOptimizerUnsupported(self): cont_features = [feature_column.real_valued_column('feature', dimension=4)] sync_optimizer = sync_replicas_optimizer.SyncReplicasOptimizer( opt=adagrad.AdagradOptimizer(learning_rate=0.1), replicas_to_aggregate=1, total_num_replicas=1) sync_hook = sync_optimizer.make_session_run_hook(is_chief=True) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( n_classes=3, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3], dnn_optimizer=sync_optimizer) with self.assertRaisesRegexp( ValueError, 'SyncReplicasOptimizer is not supported in DNNLinearCombined model'): classifier.fit( input_fn=test_data.iris_input_multiclass_fn, steps=100, monitors=[sync_hook]) def testEmbeddingMultiplier(self): embedding_language = feature_column.embedding_column( feature_column.sparse_column_with_hash_bucket('language', 10), dimension=1, initializer=init_ops.constant_initializer(0.1)) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( dnn_feature_columns=[embedding_language], dnn_hidden_units=[3, 3], embedding_lr_multipliers={embedding_language: 0.8}) self.assertEqual({ embedding_language: 0.8 }, classifier.params['embedding_lr_multipliers']) def testInputPartitionSize(self): def _input_fn_float_label(num_epochs=None): features = { 'language': sparse_tensor.SparseTensor( values=input_lib.limit_epochs( ['en', 'fr', 'zh'], num_epochs=num_epochs), indices=[[0, 0], [0, 1], [2, 0]], dense_shape=[3, 2]) } labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32) return features, labels language_column = feature_column.sparse_column_with_hash_bucket( 'language', hash_bucket_size=20) feature_columns = [ feature_column.embedding_column(language_column, dimension=1), ] # Set num_ps_replica to be 10 and the min slice size to be extremely small, # so as to ensure that there'll be 10 partititions produced. config = run_config.RunConfig(tf_random_seed=1) config._num_ps_replicas = 10 classifier = dnn_linear_combined.DNNLinearCombinedClassifier( n_classes=2, dnn_feature_columns=feature_columns, dnn_hidden_units=[3, 3], dnn_optimizer='Adagrad', config=config, input_layer_min_slice_size=1) # Ensure the param is passed in. self.assertTrue(callable(classifier.params['input_layer_partitioner'])) # Ensure the partition count is 10. classifier.fit(input_fn=_input_fn_float_label, steps=50) partition_count = 0 for name in classifier.get_variable_names(): if 'language_embedding' in name and 'Adagrad' in name: partition_count += 1 self.assertEqual(10, partition_count) def testLogisticRegression_MatrixData(self): """Tests binary classification using matrix data as input.""" iris = test_data.prepare_iris_data_for_logistic_regression() cont_features = [feature_column.real_valued_column('feature', dimension=4)] bucketized_feature = [ feature_column.bucketized_column( cont_features[0], test_data.get_quantile_based_buckets(iris.data, 10)) ] classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=bucketized_feature, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3]) classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100) scores = classifier.evaluate( input_fn=test_data.iris_input_logistic_fn, steps=100) _assert_metrics_in_range(('accuracy', 'auc'), scores) def testLogisticRegression_TensorData(self): """Tests binary classification using Tensor data as input.""" def _input_fn(): iris = test_data.prepare_iris_data_for_logistic_regression() features = {} for i in range(4): # The following shows how to provide the Tensor data for # RealValuedColumns. features.update({ str(i): array_ops.reshape( constant_op.constant( iris.data[:, i], dtype=dtypes.float32), [-1, 1]) }) # The following shows how to provide the SparseTensor data for # a SparseColumn. features['dummy_sparse_column'] = sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [0, 1], [60, 0]], dense_shape=[len(iris.target), 2]) labels = array_ops.reshape( constant_op.constant( iris.target, dtype=dtypes.int32), [-1, 1]) return features, labels iris = test_data.prepare_iris_data_for_logistic_regression() cont_features = [ feature_column.real_valued_column(str(i)) for i in range(4) ] linear_features = [ feature_column.bucketized_column(cont_features[i], test_data.get_quantile_based_buckets( iris.data[:, i], 10)) for i in range(4) ] linear_features.append( feature_column.sparse_column_with_hash_bucket( 'dummy_sparse_column', hash_bucket_size=100)) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=linear_features, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3]) classifier.fit(input_fn=_input_fn, steps=100) scores = classifier.evaluate(input_fn=_input_fn, steps=100) _assert_metrics_in_range(('accuracy', 'auc'), scores) def testEstimatorWithCoreFeatureColumns(self): """Tests binary classification using Tensor data as input.""" def _input_fn(): iris = test_data.prepare_iris_data_for_logistic_regression() features = {} for i in range(4): # The following shows how to provide the Tensor data for # RealValuedColumns. features.update({ str(i): array_ops.reshape( constant_op.constant(iris.data[:, i], dtype=dtypes.float32), [-1, 1]) }) # The following shows how to provide the SparseTensor data for # a SparseColumn. features['dummy_sparse_column'] = sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [0, 1], [60, 0]], dense_shape=[len(iris.target), 2]) labels = array_ops.reshape( constant_op.constant(iris.target, dtype=dtypes.int32), [-1, 1]) return features, labels iris = test_data.prepare_iris_data_for_logistic_regression() cont_features = [fc_core.numeric_column(str(i)) for i in range(4)] linear_features = [ fc_core.bucketized_column( cont_features[i], sorted(set(test_data.get_quantile_based_buckets( iris.data[:, i], 10)))) for i in range(4) ] linear_features.append( fc_core.categorical_column_with_hash_bucket( 'dummy_sparse_column', hash_bucket_size=100)) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=linear_features, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3]) classifier.fit(input_fn=_input_fn, steps=100) scores = classifier.evaluate(input_fn=_input_fn, steps=100) _assert_metrics_in_range(('accuracy', 'auc'), scores) def testTrainWithPartitionedVariables(self): """Tests training with partitioned variables.""" def _input_fn(): features = { 'language': sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [0, 1], [2, 0]], dense_shape=[3, 2]) } labels = constant_op.constant([[1], [0], [0]]) return features, labels sparse_features = [ # The given hash_bucket_size results in variables larger than the # default min_slice_size attribute, so the variables are partitioned. feature_column.sparse_column_with_hash_bucket( 'language', hash_bucket_size=2e7) ] embedding_features = [ feature_column.embedding_column( sparse_features[0], dimension=1) ] tf_config = { 'cluster': { run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1'] } } with test.mock.patch.dict('os.environ', {'TF_CONFIG': json.dumps(tf_config)}): config = run_config.RunConfig() # Because we did not start a distributed cluster, we need to pass an # empty ClusterSpec, otherwise the device_setter will look for # distributed jobs, such as "/job:ps" which are not present. config._cluster_spec = server_lib.ClusterSpec({}) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=sparse_features, dnn_feature_columns=embedding_features, dnn_hidden_units=[3, 3], config=config) classifier.fit(input_fn=_input_fn, steps=100) scores = classifier.evaluate(input_fn=_input_fn, steps=1) _assert_metrics_in_range(('accuracy', 'auc'), scores) def testMultiClass(self): """Tests multi-class classification using matrix data as input. Please see testLogisticRegression_TensorData() for how to use Tensor data as input instead. """ iris = base.load_iris() cont_features = [feature_column.real_valued_column('feature', dimension=4)] bucketized_features = [ feature_column.bucketized_column( cont_features[0], test_data.get_quantile_based_buckets(iris.data, 10)) ] classifier = dnn_linear_combined.DNNLinearCombinedClassifier( n_classes=3, linear_feature_columns=bucketized_features, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3]) classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100) scores = classifier.evaluate( input_fn=test_data.iris_input_multiclass_fn, steps=100) _assert_metrics_in_range(('accuracy',), scores) def testMultiClassLabelKeys(self): """Tests n_classes > 2 with label_keys vocabulary for labels.""" # Byte literals needed for python3 test to pass. label_keys = [b'label0', b'label1', b'label2'] def _input_fn(num_epochs=None): features = { 'age': input_lib.limit_epochs( constant_op.constant([[.8], [0.2], [.1]]), num_epochs=num_epochs), 'language': sparse_tensor.SparseTensor( values=input_lib.limit_epochs( ['en', 'fr', 'zh'], num_epochs=num_epochs), indices=[[0, 0], [0, 1], [2, 0]], dense_shape=[3, 2]) } labels = constant_op.constant( [[label_keys[1]], [label_keys[0]], [label_keys[0]]], dtype=dtypes.string) return features, labels language_column = feature_column.sparse_column_with_hash_bucket( 'language', hash_bucket_size=20) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( n_classes=3, linear_feature_columns=[language_column], dnn_feature_columns=[ feature_column.embedding_column( language_column, dimension=1), feature_column.real_valued_column('age') ], dnn_hidden_units=[3, 3], label_keys=label_keys) classifier.fit(input_fn=_input_fn, steps=50) scores = classifier.evaluate(input_fn=_input_fn, steps=1) _assert_metrics_in_range(('accuracy',), scores) self.assertIn('loss', scores) predict_input_fn = functools.partial(_input_fn, num_epochs=1) predicted_classes = list( classifier.predict_classes( input_fn=predict_input_fn, as_iterable=True)) self.assertEqual(3, len(predicted_classes)) for pred in predicted_classes: self.assertIn(pred, label_keys) predictions = list( classifier.predict(input_fn=predict_input_fn, as_iterable=True)) self.assertAllEqual(predicted_classes, predictions) def testLoss(self): """Tests loss calculation.""" def _input_fn_train(): # Create 4 rows, one of them (y = x), three of them (y=Not(x)) # The logistic prediction should be (y = 0.25). features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),} labels = constant_op.constant([[1], [0], [0], [0]]) return features, labels classifier = dnn_linear_combined.DNNLinearCombinedClassifier( n_classes=2, linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) classifier.fit(input_fn=_input_fn_train, steps=100) scores = classifier.evaluate(input_fn=_input_fn_train, steps=1) # Cross entropy = -0.25*log(0.25)-0.75*log(0.75) = 0.562 self.assertAlmostEqual(0.562, scores['loss'], delta=0.1) def testLossWithWeights(self): """Tests loss calculation with weights.""" def _input_fn_train(): # 4 rows with equal weight, one of them (y = x), three of them (y=Not(x)) # The logistic prediction should be (y = 0.25). features = { 'x': array_ops.ones( shape=[4, 1], dtype=dtypes.float32), 'w': constant_op.constant([[1.], [1.], [1.], [1.]]) } labels = constant_op.constant([[1.], [0.], [0.], [0.]]) return features, labels def _input_fn_eval(): # 4 rows, with different weights. features = { 'x': array_ops.ones( shape=[4, 1], dtype=dtypes.float32), 'w': constant_op.constant([[7.], [1.], [1.], [1.]]) } labels = constant_op.constant([[1.], [0.], [0.], [0.]]) return features, labels classifier = dnn_linear_combined.DNNLinearCombinedClassifier( weight_column_name='w', n_classes=2, linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) classifier.fit(input_fn=_input_fn_train, steps=100) scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1) # Weighted cross entropy = (-7*log(0.25)-3*log(0.75))/10 = 1.06 self.assertAlmostEqual(1.06, scores['loss'], delta=0.1) def testTrainWithWeights(self): """Tests training with given weight column.""" def _input_fn_train(): # Create 4 rows, one of them (y = x), three of them (y=Not(x)) # First row has more weight than others. Model should fit (y=x) better # than (y=Not(x)) due to the relative higher weight of the first row. labels = constant_op.constant([[1], [0], [0], [0]]) features = { 'x': array_ops.ones( shape=[4, 1], dtype=dtypes.float32), 'w': constant_op.constant([[100.], [3.], [2.], [2.]]) } return features, labels def _input_fn_eval(): # Create 4 rows (y = x). labels = constant_op.constant([[1], [1], [1], [1]]) features = { 'x': array_ops.ones( shape=[4, 1], dtype=dtypes.float32), 'w': constant_op.constant([[1.], [1.], [1.], [1.]]) } return features, labels classifier = dnn_linear_combined.DNNLinearCombinedClassifier( weight_column_name='w', linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) classifier.fit(input_fn=_input_fn_train, steps=100) scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1) _assert_metrics_in_range(('accuracy',), scores) def testCustomOptimizerByObject(self): """Tests binary classification using matrix data as input.""" iris = test_data.prepare_iris_data_for_logistic_regression() cont_features = [feature_column.real_valued_column('feature', dimension=4)] bucketized_features = [ feature_column.bucketized_column( cont_features[0], test_data.get_quantile_based_buckets(iris.data, 10)) ] classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=bucketized_features, linear_optimizer=ftrl.FtrlOptimizer(learning_rate=0.1), dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3], dnn_optimizer=adagrad.AdagradOptimizer(learning_rate=0.1)) classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100) scores = classifier.evaluate( input_fn=test_data.iris_input_logistic_fn, steps=100) _assert_metrics_in_range(('accuracy',), scores) def testCustomOptimizerByString(self): """Tests binary classification using matrix data as input.""" iris = test_data.prepare_iris_data_for_logistic_regression() cont_features = [feature_column.real_valued_column('feature', dimension=4)] bucketized_features = [ feature_column.bucketized_column( cont_features[0], test_data.get_quantile_based_buckets(iris.data, 10)) ] classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=bucketized_features, linear_optimizer='Ftrl', dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3], dnn_optimizer='Adagrad') classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100) scores = classifier.evaluate( input_fn=test_data.iris_input_logistic_fn, steps=100) _assert_metrics_in_range(('accuracy',), scores) def testCustomOptimizerByFunction(self): """Tests binary classification using matrix data as input.""" iris = test_data.prepare_iris_data_for_logistic_regression() cont_features = [feature_column.real_valued_column('feature', dimension=4)] bucketized_features = [ feature_column.bucketized_column( cont_features[0], test_data.get_quantile_based_buckets(iris.data, 10)) ] def _optimizer_exp_decay(): global_step = training_util.get_global_step() learning_rate = learning_rate_decay.exponential_decay( learning_rate=0.1, global_step=global_step, decay_steps=100, decay_rate=0.001) return adagrad.AdagradOptimizer(learning_rate=learning_rate) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=bucketized_features, linear_optimizer=_optimizer_exp_decay, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3], dnn_optimizer=_optimizer_exp_decay) classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100) scores = classifier.evaluate( input_fn=test_data.iris_input_logistic_fn, steps=100) _assert_metrics_in_range(('accuracy',), scores) def testPredict(self): """Tests weight column in evaluation.""" def _input_fn_train(): # Create 4 rows, one of them (y = x), three of them (y=Not(x)) labels = constant_op.constant([[1], [0], [0], [0]]) features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32)} return features, labels def _input_fn_predict(): y = input_lib.limit_epochs( array_ops.ones( shape=[4, 1], dtype=dtypes.float32), num_epochs=1) features = {'x': y} return features classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3]) classifier.fit(input_fn=_input_fn_train, steps=100) probs = list(classifier.predict_proba(input_fn=_input_fn_predict)) self.assertAllClose([[0.75, 0.25]] * 4, probs, 0.05) classes = list(classifier.predict_classes(input_fn=_input_fn_predict)) self.assertListEqual([0] * 4, classes) def testCustomMetrics(self): """Tests custom evaluation metrics.""" def _input_fn(num_epochs=None): # Create 4 rows, one of them (y = x), three of them (y=Not(x)) labels = constant_op.constant([[1], [0], [0], [0]]) features = { 'x': input_lib.limit_epochs( array_ops.ones( shape=[4, 1], dtype=dtypes.float32), num_epochs=num_epochs) } return features, labels def _my_metric_op(predictions, labels): # For the case of binary classification, the 2nd column of "predictions" # denotes the model predictions. labels = math_ops.to_float(labels) predictions = array_ops.strided_slice( predictions, [0, 1], [-1, 2], end_mask=1) return math_ops.reduce_sum(math_ops.multiply(predictions, labels)) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3]) classifier.fit(input_fn=_input_fn, steps=100) scores = classifier.evaluate( input_fn=_input_fn, steps=100, metrics={ 'my_accuracy': MetricSpec( metric_fn=metric_ops.streaming_accuracy, prediction_key='classes'), 'my_precision': MetricSpec( metric_fn=metric_ops.streaming_precision, prediction_key='classes'), 'my_metric': MetricSpec( metric_fn=_my_metric_op, prediction_key='probabilities') }) self.assertTrue( set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset( set(scores.keys()))) predict_input_fn = functools.partial(_input_fn, num_epochs=1) predictions = np.array(list(classifier.predict_classes( input_fn=predict_input_fn))) self.assertEqual( _sklearn.accuracy_score([1, 0, 0, 0], predictions), scores['my_accuracy']) # Test the case where the 2nd element of the key is neither "classes" nor # "probabilities". with self.assertRaisesRegexp(KeyError, 'bad_type'): classifier.evaluate( input_fn=_input_fn, steps=100, metrics={('bad_name', 'bad_type'): metric_ops.streaming_auc}) # Test the case where the tuple of the key doesn't have 2 elements. with self.assertRaises(ValueError): classifier.evaluate( input_fn=_input_fn, steps=100, metrics={ ('bad_length_name', 'classes', 'bad_length'): metric_ops.streaming_accuracy }) # Test the case where the prediction_key is neither "classes" nor # "probabilities". with self.assertRaisesRegexp(KeyError, 'bad_type'): classifier.evaluate( input_fn=_input_fn, steps=100, metrics={ 'bad_name': MetricSpec( metric_fn=metric_ops.streaming_auc, prediction_key='bad_type') }) def testVariableQuery(self): """Tests get_variable_names and get_variable_value.""" def _input_fn_train(): # Create 4 rows, three (y = x), one (y=Not(x)) labels = constant_op.constant([[1], [1], [1], [0]]) features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),} return features, labels classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3]) classifier.fit(input_fn=_input_fn_train, steps=500) var_names = classifier.get_variable_names() self.assertGreater(len(var_names), 3) for name in var_names: classifier.get_variable_value(name) def testExport(self): """Tests export model for servo.""" def input_fn(): return { 'age': constant_op.constant([1]), 'language': sparse_tensor.SparseTensor( values=['english'], indices=[[0, 0]], dense_shape=[1, 1]) }, constant_op.constant([[1]]) language = feature_column.sparse_column_with_hash_bucket('language', 100) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=[ feature_column.real_valued_column('age'), language, ], dnn_feature_columns=[ feature_column.embedding_column( language, dimension=1), ], dnn_hidden_units=[3, 3]) classifier.fit(input_fn=input_fn, steps=100) export_dir = tempfile.mkdtemp() input_feature_key = 'examples' def serving_input_fn(): features, targets = input_fn() features[input_feature_key] = array_ops.placeholder(dtypes.string) return features, targets classifier.export( export_dir, serving_input_fn, input_feature_key, use_deprecated_input_fn=False) def testCenteredBias(self): """Tests bias is centered or not.""" def _input_fn_train(): # Create 4 rows, three (y = x), one (y=Not(x)) labels = constant_op.constant([[1], [1], [1], [0]]) features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),} return features, labels classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3], enable_centered_bias=True) classifier.fit(input_fn=_input_fn_train, steps=1000) self.assertIn('binary_logistic_head/centered_bias_weight', classifier.get_variable_names()) # logodds(0.75) = 1.09861228867 self.assertAlmostEqual( 1.0986, float(classifier.get_variable_value( 'binary_logistic_head/centered_bias_weight')[0]), places=2) def testDisableCenteredBias(self): """Tests bias is centered or not.""" def _input_fn_train(): # Create 4 rows, three (y = x), one (y=Not(x)) labels = constant_op.constant([[1], [1], [1], [0]]) features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),} return features, labels classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3], enable_centered_bias=False) classifier.fit(input_fn=_input_fn_train, steps=500) self.assertNotIn('centered_bias_weight', classifier.get_variable_names()) def testGlobalStepLinearOnly(self): """Tests global step update for linear-only model.""" def input_fn(): return { 'age': constant_op.constant([1]), 'language': sparse_tensor.SparseTensor( values=['english'], indices=[[0, 0]], dense_shape=[1, 1]) }, constant_op.constant([[1]]) language = feature_column.sparse_column_with_hash_bucket('language', 10) age = feature_column.real_valued_column('age') step_counter = _StepCounterHook() classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=[age, language]) classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter]) self.assertEqual(100, step_counter.steps) def testGlobalStepDNNOnly(self): """Tests global step update for dnn-only model.""" def input_fn(): return { 'language': sparse_tensor.SparseTensor( values=['english'], indices=[[0, 0]], dense_shape=[1, 1]) }, constant_op.constant([[1]]) language = feature_column.sparse_column_with_hash_bucket('language', 10) step_counter = _StepCounterHook() classifier = dnn_linear_combined.DNNLinearCombinedClassifier( dnn_feature_columns=[ feature_column.embedding_column(language, dimension=1)], dnn_hidden_units=[3, 3]) classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter]) self.assertEqual(100, step_counter.steps) def testGlobalStepDNNLinearCombinedBug(self): """Tests global step update for dnn-linear combined model.""" def input_fn(): return { 'age': constant_op.constant([1]), 'language': sparse_tensor.SparseTensor( values=['english'], indices=[[0, 0]], dense_shape=[1, 1]) }, constant_op.constant([[1]]) language = feature_column.sparse_column_with_hash_bucket('language', 10) age = feature_column.real_valued_column('age') step_counter = _StepCounterHook() classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=[age, language], dnn_feature_columns=[ feature_column.embedding_column(language, dimension=1)], dnn_hidden_units=[3, 3], fix_global_step_increment_bug=False) classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter]) global_step = classifier.get_variable_value('global_step') if global_step == 100: # Expected is 100, but because of the global step increment bug, is 50. self.assertEqual(50, step_counter.steps) else: # Occasionally, training stops when global_step == 101, due to a race # condition. self.assertEqual(51, step_counter.steps) def testGlobalStepDNNLinearCombinedBugFixed(self): """Tests global step update for dnn-linear combined model.""" def input_fn(): return { 'age': constant_op.constant([1]), 'language': sparse_tensor.SparseTensor( values=['english'], indices=[[0, 0]], dense_shape=[1, 1]) }, constant_op.constant([[1]]) language = feature_column.sparse_column_with_hash_bucket('language', 10) age = feature_column.real_valued_column('age') step_counter = _StepCounterHook() classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=[age, language], dnn_feature_columns=[ feature_column.embedding_column(language, dimension=1)], dnn_hidden_units=[3, 3], fix_global_step_increment_bug=True) classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter]) self.assertEqual(100, step_counter.steps) def testLinearOnly(self): """Tests that linear-only instantiation works.""" def input_fn(): return { 'age': constant_op.constant([1]), 'language': sparse_tensor.SparseTensor( values=['english'], indices=[[0, 0]], dense_shape=[1, 1]) }, constant_op.constant([[1]]) language = feature_column.sparse_column_with_hash_bucket('language', 100) age = feature_column.real_valued_column('age') classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=[age, language]) classifier.fit(input_fn=input_fn, steps=100) loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss'] classifier.fit(input_fn=input_fn, steps=200) loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss'] self.assertLess(loss2, loss1) variable_names = classifier.get_variable_names() self.assertNotIn('dnn/logits/biases', variable_names) self.assertNotIn('dnn/logits/weights', variable_names) self.assertIn('linear/bias_weight', variable_names) self.assertIn('linear/age/weight', variable_names) self.assertIn('linear/language/weights', variable_names) self.assertEquals( 1, len(classifier.get_variable_value('linear/age/weight'))) self.assertEquals( 100, len(classifier.get_variable_value('linear/language/weights'))) def testLinearOnlyOneFeature(self): """Tests that linear-only instantiation works for one feature only.""" def input_fn(): return { 'language': sparse_tensor.SparseTensor( values=['english'], indices=[[0, 0]], dense_shape=[1, 1]) }, constant_op.constant([[1]]) language = feature_column.sparse_column_with_hash_bucket('language', 99) classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=[language]) classifier.fit(input_fn=input_fn, steps=100) loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss'] classifier.fit(input_fn=input_fn, steps=200) loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss'] self.assertLess(loss2, loss1) variable_names = classifier.get_variable_names() self.assertNotIn('dnn/logits/biases', variable_names) self.assertNotIn('dnn/logits/weights', variable_names) self.assertIn('linear/bias_weight', variable_names) self.assertIn('linear/language/weights', variable_names) self.assertEquals( 1, len(classifier.get_variable_value('linear/bias_weight'))) self.assertEquals( 99, len(classifier.get_variable_value('linear/language/weights'))) def testDNNOnly(self): """Tests that DNN-only instantiation works.""" cont_features = [feature_column.real_valued_column('feature', dimension=4)] classifier = dnn_linear_combined.DNNLinearCombinedClassifier( n_classes=3, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3]) classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=1000) classifier.evaluate(input_fn=test_data.iris_input_multiclass_fn, steps=100) variable_names = classifier.get_variable_names() self.assertIn('dnn/hiddenlayer_0/weights', variable_names) self.assertIn('dnn/hiddenlayer_0/biases', variable_names) self.assertIn('dnn/hiddenlayer_1/weights', variable_names) self.assertIn('dnn/hiddenlayer_1/biases', variable_names) self.assertIn('dnn/logits/weights', variable_names) self.assertIn('dnn/logits/biases', variable_names) self.assertNotIn('linear/bias_weight', variable_names) self.assertNotIn('linear/feature_BUCKETIZED/weight', variable_names) def testDNNWeightsBiasesNames(self): """Tests the names of DNN weights and biases in the checkpoints.""" def _input_fn_train(): # Create 4 rows, three (y = x), one (y=Not(x)) labels = constant_op.constant([[1], [1], [1], [0]]) features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),} return features, labels classifier = dnn_linear_combined.DNNLinearCombinedClassifier( linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3]) classifier.fit(input_fn=_input_fn_train, steps=5) variable_names = classifier.get_variable_names() self.assertIn('dnn/hiddenlayer_0/weights', variable_names) self.assertIn('dnn/hiddenlayer_0/biases', variable_names) self.assertIn('dnn/hiddenlayer_1/weights', variable_names) self.assertIn('dnn/hiddenlayer_1/biases', variable_names) self.assertIn('dnn/logits/weights', variable_names) self.assertIn('dnn/logits/biases', variable_names) class DNNLinearCombinedRegressorTest(test.TestCase): def testExperimentIntegration(self): cont_features = [feature_column.real_valued_column('feature', dimension=4)] exp = experiment.Experiment( estimator=dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=cont_features, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3]), train_input_fn=test_data.iris_input_logistic_fn, eval_input_fn=test_data.iris_input_logistic_fn) exp.test() def testEstimatorContract(self): estimator_test_utils.assert_estimator_contract( self, dnn_linear_combined.DNNLinearCombinedRegressor) def testRegression_MatrixData(self): """Tests regression using matrix data as input.""" cont_features = [feature_column.real_valued_column('feature', dimension=4)] regressor = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=cont_features, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=test_data.iris_input_logistic_fn, steps=10) scores = regressor.evaluate( input_fn=test_data.iris_input_logistic_fn, steps=1) self.assertIn('loss', scores.keys()) def testRegression_TensorData(self): """Tests regression using tensor data as input.""" def _input_fn(): # Create 4 rows of (y = x) labels = constant_op.constant([[100.], [3.], [2.], [2.]]) features = {'x': constant_op.constant([[100.], [3.], [2.], [2.]])} return features, labels classifier = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) classifier.fit(input_fn=_input_fn, steps=10) classifier.evaluate(input_fn=_input_fn, steps=1) def testLoss(self): """Tests loss calculation.""" def _input_fn_train(): # Create 4 rows, one of them (y = x), three of them (y=Not(x)) # The algorithm should learn (y = 0.25). labels = constant_op.constant([[1.], [0.], [0.], [0.]]) features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),} return features, labels regressor = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=_input_fn_train, steps=100) scores = regressor.evaluate(input_fn=_input_fn_train, steps=1) # Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875 self.assertAlmostEqual(0.1875, scores['loss'], delta=0.1) def testLossWithWeights(self): """Tests loss calculation with weights.""" def _input_fn_train(): # 4 rows with equal weight, one of them (y = x), three of them (y=Not(x)) # The algorithm should learn (y = 0.25). labels = constant_op.constant([[1.], [0.], [0.], [0.]]) features = { 'x': array_ops.ones( shape=[4, 1], dtype=dtypes.float32), 'w': constant_op.constant([[1.], [1.], [1.], [1.]]) } return features, labels def _input_fn_eval(): # 4 rows, with different weights. labels = constant_op.constant([[1.], [0.], [0.], [0.]]) features = { 'x': array_ops.ones( shape=[4, 1], dtype=dtypes.float32), 'w': constant_op.constant([[7.], [1.], [1.], [1.]]) } return features, labels regressor = dnn_linear_combined.DNNLinearCombinedRegressor( weight_column_name='w', linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=_input_fn_train, steps=100) scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1) # Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125 self.assertAlmostEqual(0.4125, scores['loss'], delta=0.1) def testTrainWithWeights(self): """Tests training with given weight column.""" def _input_fn_train(): # Create 4 rows, one of them (y = x), three of them (y=Not(x)) # First row has more weight than others. Model should fit (y=x) better # than (y=Not(x)) due to the relative higher weight of the first row. labels = constant_op.constant([[1.], [0.], [0.], [0.]]) features = { 'x': array_ops.ones( shape=[4, 1], dtype=dtypes.float32), 'w': constant_op.constant([[100.], [3.], [2.], [2.]]) } return features, labels def _input_fn_eval(): # Create 4 rows (y = x) labels = constant_op.constant([[1.], [1.], [1.], [1.]]) features = { 'x': array_ops.ones( shape=[4, 1], dtype=dtypes.float32), 'w': constant_op.constant([[1.], [1.], [1.], [1.]]) } return features, labels regressor = dnn_linear_combined.DNNLinearCombinedRegressor( weight_column_name='w', linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=_input_fn_train, steps=100) scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1) # The model should learn (y = x) because of the weights, so the loss should # be close to zero. self.assertLess(scores['loss'], 0.2) def testPredict_AsIterableFalse(self): """Tests predict method with as_iterable=False.""" labels = [1., 0., 0.2] def _input_fn(num_epochs=None): features = { 'age': input_lib.limit_epochs( constant_op.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs), 'language': sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [0, 1], [2, 0]], dense_shape=[3, 2]) } return features, constant_op.constant(labels, dtype=dtypes.float32) language_column = feature_column.sparse_column_with_hash_bucket( 'language', hash_bucket_size=20) regressor = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[ language_column, feature_column.real_valued_column('age') ], dnn_feature_columns=[ feature_column.embedding_column( language_column, dimension=1), feature_column.real_valued_column('age') ], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=_input_fn, steps=10) scores = regressor.evaluate(input_fn=_input_fn, steps=1) self.assertIn('loss', scores.keys()) regressor.predict_scores(input_fn=_input_fn, as_iterable=False) def testPredict_AsIterable(self): """Tests predict method with as_iterable=True.""" labels = [1., 0., 0.2] def _input_fn(num_epochs=None): features = { 'age': input_lib.limit_epochs( constant_op.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs), 'language': sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [0, 1], [2, 0]], dense_shape=[3, 2]) } return features, constant_op.constant(labels, dtype=dtypes.float32) language_column = feature_column.sparse_column_with_hash_bucket( 'language', hash_bucket_size=20) regressor = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[ language_column, feature_column.real_valued_column('age') ], dnn_feature_columns=[ feature_column.embedding_column( language_column, dimension=1), feature_column.real_valued_column('age') ], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=_input_fn, steps=10) scores = regressor.evaluate(input_fn=_input_fn, steps=1) self.assertIn('loss', scores.keys()) predict_input_fn = functools.partial(_input_fn, num_epochs=1) regressor.predict_scores(input_fn=predict_input_fn, as_iterable=True) def testCustomMetrics(self): """Tests custom evaluation metrics.""" def _input_fn(num_epochs=None): # Create 4 rows, one of them (y = x), three of them (y=Not(x)) labels = constant_op.constant([[1.], [0.], [0.], [0.]]) features = { 'x': input_lib.limit_epochs( array_ops.ones( shape=[4, 1], dtype=dtypes.float32), num_epochs=num_epochs) } return features, labels def _my_metric_op(predictions, labels): return math_ops.reduce_sum(math_ops.multiply(predictions, labels)) regressor = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=_input_fn, steps=10) scores = regressor.evaluate( input_fn=_input_fn, steps=1, metrics={ 'my_error': metric_ops.streaming_mean_squared_error, ('my_metric', 'scores'): _my_metric_op }) self.assertIn('loss', set(scores.keys())) self.assertIn('my_error', set(scores.keys())) self.assertIn('my_metric', set(scores.keys())) predict_input_fn = functools.partial(_input_fn, num_epochs=1) predictions = np.array(list(regressor.predict_scores( input_fn=predict_input_fn))) self.assertAlmostEqual( _sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions), scores['my_error']) # Tests the case that the 2nd element of the key is not "scores". with self.assertRaises(KeyError): regressor.evaluate( input_fn=_input_fn, steps=1, metrics={ ('my_error', 'predictions'): metric_ops.streaming_mean_squared_error }) # Tests the case where the tuple of the key doesn't have 2 elements. with self.assertRaises(ValueError): regressor.evaluate( input_fn=_input_fn, steps=1, metrics={ ('bad_length_name', 'scores', 'bad_length'): metric_ops.streaming_mean_squared_error }) def testCustomMetricsWithMetricSpec(self): """Tests custom evaluation metrics.""" def _input_fn(num_epochs=None): # Create 4 rows, one of them (y = x), three of them (y=Not(x)) labels = constant_op.constant([[1.], [0.], [0.], [0.]]) features = { 'x': input_lib.limit_epochs( array_ops.ones( shape=[4, 1], dtype=dtypes.float32), num_epochs=num_epochs) } return features, labels def _my_metric_op(predictions, labels): return math_ops.reduce_sum(math_ops.multiply(predictions, labels)) regressor = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=_input_fn, steps=5) scores = regressor.evaluate( input_fn=_input_fn, steps=1, metrics={ 'my_error': MetricSpec( metric_fn=metric_ops.streaming_mean_squared_error, prediction_key='scores'), 'my_metric': MetricSpec( metric_fn=_my_metric_op, prediction_key='scores') }) self.assertIn('loss', set(scores.keys())) self.assertIn('my_error', set(scores.keys())) self.assertIn('my_metric', set(scores.keys())) predict_input_fn = functools.partial(_input_fn, num_epochs=1) predictions = np.array(list(regressor.predict_scores( input_fn=predict_input_fn))) self.assertAlmostEqual( _sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions), scores['my_error']) # Tests the case where the prediction_key is not "scores". with self.assertRaisesRegexp(KeyError, 'bad_type'): regressor.evaluate( input_fn=_input_fn, steps=1, metrics={ 'bad_name': MetricSpec( metric_fn=metric_ops.streaming_auc, prediction_key='bad_type') }) def testExport(self): """Tests export model for servo.""" labels = [1., 0., 0.2] def _input_fn(num_epochs=None): features = { 'age': input_lib.limit_epochs( constant_op.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs), 'language': sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [0, 1], [2, 0]], dense_shape=[3, 2]) } return features, constant_op.constant(labels, dtype=dtypes.float32) language_column = feature_column.sparse_column_with_hash_bucket( 'language', hash_bucket_size=20) regressor = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[ language_column, feature_column.real_valued_column('age') ], dnn_feature_columns=[ feature_column.embedding_column( language_column, dimension=1), ], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=_input_fn, steps=10) export_dir = tempfile.mkdtemp() input_feature_key = 'examples' def serving_input_fn(): features, targets = _input_fn() features[input_feature_key] = array_ops.placeholder(dtypes.string) return features, targets regressor.export( export_dir, serving_input_fn, input_feature_key, use_deprecated_input_fn=False) def testTrainSaveLoad(self): """Tests regression with restarting training / evaluate.""" def _input_fn(num_epochs=None): # Create 4 rows of (y = x) labels = constant_op.constant([[100.], [3.], [2.], [2.]]) features = { 'x': input_lib.limit_epochs( constant_op.constant([[100.], [3.], [2.], [2.]]), num_epochs=num_epochs) } return features, labels model_dir = tempfile.mkdtemp() # pylint: disable=g-long-lambda new_regressor = lambda: dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3], model_dir=model_dir, config=run_config.RunConfig(tf_random_seed=1)) predict_input_fn = functools.partial(_input_fn, num_epochs=1) regressor = new_regressor() regressor.fit(input_fn=_input_fn, steps=10) predictions = list(regressor.predict_scores(input_fn=predict_input_fn)) del regressor regressor = new_regressor() predictions2 = list(regressor.predict_scores(input_fn=predict_input_fn)) self.assertAllClose(predictions, predictions2) def testTrainWithPartitionedVariables(self): """Tests training with partitioned variables.""" def _input_fn(num_epochs=None): features = { 'age': input_lib.limit_epochs( constant_op.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs), 'language': sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [0, 1], [2, 0]], dense_shape=[3, 2]) } return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32) # The given hash_bucket_size results in variables larger than the # default min_slice_size attribute, so the variables are partitioned. language_column = feature_column.sparse_column_with_hash_bucket( 'language', hash_bucket_size=2e7) tf_config = { 'cluster': { run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1'] } } with test.mock.patch.dict('os.environ', {'TF_CONFIG': json.dumps(tf_config)}): config = run_config.RunConfig(tf_random_seed=1) # Because we did not start a distributed cluster, we need to pass an # empty ClusterSpec, otherwise the device_setter will look for # distributed jobs, such as "/job:ps" which are not present. config._cluster_spec = server_lib.ClusterSpec({}) regressor = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[ language_column, feature_column.real_valued_column('age') ], dnn_feature_columns=[ feature_column.embedding_column( language_column, dimension=1), feature_column.real_valued_column('age') ], dnn_hidden_units=[3, 3], config=config) regressor.fit(input_fn=_input_fn, steps=100) scores = regressor.evaluate(input_fn=_input_fn, steps=1) self.assertIn('loss', scores.keys()) def testDisableCenteredBias(self): """Tests that we can disable centered bias.""" def _input_fn(num_epochs=None): features = { 'age': input_lib.limit_epochs( constant_op.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs), 'language': sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [0, 1], [2, 0]], dense_shape=[3, 2]) } return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32) language_column = feature_column.sparse_column_with_hash_bucket( 'language', hash_bucket_size=20) regressor = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[ language_column, feature_column.real_valued_column('age') ], dnn_feature_columns=[ feature_column.embedding_column( language_column, dimension=1), feature_column.real_valued_column('age') ], dnn_hidden_units=[3, 3], enable_centered_bias=False, config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=_input_fn, steps=100) scores = regressor.evaluate(input_fn=_input_fn, steps=1) self.assertIn('loss', scores.keys()) def testLinearOnly(self): """Tests linear-only instantiation and training.""" def _input_fn(num_epochs=None): features = { 'age': input_lib.limit_epochs( constant_op.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs), 'language': sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [0, 1], [2, 0]], dense_shape=[3, 2]) } return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32) language_column = feature_column.sparse_column_with_hash_bucket( 'language', hash_bucket_size=20) regressor = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[ language_column, feature_column.real_valued_column('age') ], config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=_input_fn, steps=100) scores = regressor.evaluate(input_fn=_input_fn, steps=1) self.assertIn('loss', scores.keys()) def testDNNOnly(self): """Tests DNN-only instantiation and training.""" def _input_fn(num_epochs=None): features = { 'age': input_lib.limit_epochs( constant_op.constant([[0.8], [0.15], [0.]]), num_epochs=num_epochs), 'language': sparse_tensor.SparseTensor( values=['en', 'fr', 'zh'], indices=[[0, 0], [0, 1], [2, 0]], dense_shape=[3, 2]) } return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32) language_column = feature_column.sparse_column_with_hash_bucket( 'language', hash_bucket_size=20) regressor = dnn_linear_combined.DNNLinearCombinedRegressor( dnn_feature_columns=[ feature_column.embedding_column( language_column, dimension=1), feature_column.real_valued_column('age') ], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) regressor.fit(input_fn=_input_fn, steps=100) scores = regressor.evaluate(input_fn=_input_fn, steps=1) self.assertIn('loss', scores.keys()) class FeatureEngineeringFunctionTest(test.TestCase): """Tests feature_engineering_fn.""" def testNoneFeatureEngineeringFn(self): def input_fn(): # Create 4 rows of (y = x) labels = constant_op.constant([[100.], [3.], [2.], [2.]]) features = {'x': constant_op.constant([[100.], [3.], [2.], [2.]])} return features, labels def feature_engineering_fn(features, labels): _, _ = features, labels labels = constant_op.constant([[1000.], [30.], [20.], [20.]]) features = {'x': constant_op.constant([[1000.], [30.], [20.], [20.]])} return features, labels estimator_with_fe_fn = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1), feature_engineering_fn=feature_engineering_fn) estimator_with_fe_fn.fit(input_fn=input_fn, steps=110) estimator_without_fe_fn = dnn_linear_combined.DNNLinearCombinedRegressor( linear_feature_columns=[feature_column.real_valued_column('x')], dnn_feature_columns=[feature_column.real_valued_column('x')], dnn_hidden_units=[3, 3], config=run_config.RunConfig(tf_random_seed=1)) estimator_without_fe_fn.fit(input_fn=input_fn, steps=110) # predictions = y prediction_with_fe_fn = next( estimator_with_fe_fn.predict_scores( input_fn=input_fn, as_iterable=True)) self.assertAlmostEqual(1000., prediction_with_fe_fn, delta=10.0) prediction_without_fe_fn = next( estimator_without_fe_fn.predict_scores( input_fn=input_fn, as_iterable=True)) self.assertAlmostEqual(100., prediction_without_fe_fn, delta=1.0) if __name__ == '__main__': test.main()
apache-2.0
shanzhenren/ClusType
src/algorithm.py
1
10630
from collections import defaultdict from operator import itemgetter from math import log, sqrt import random as rn import time from numpy import * # install numpy from scipy import * # install scipy from numpy.linalg import norm import numpy.linalg as npl from scipy.sparse import * import scipy.sparse.linalg as spsl from sklearn.preprocessing import normalize ### install from http://scikit-learn.org/stable/ def create_matrix(size_row, size_col): return csr_matrix((size_row, size_col)) def create_dense_matrix(size_row, size_col): return mat(zeros((size_row, size_col))) def set_Y(train_mid, seedMention_tid_score, mid_mention, size_row, size_col): row = [] col = [] val = [] num_NIL = 0 num_target = 0 NIL_set = set() for mid in train_mid: # in training set mention = mid_mention[mid] if mention in seedMention_tid_score: # in ground truth tid = seedMention_tid_score[mention][0] score = seedMention_tid_score[mention][1] if tid == (size_col - 1): # NIL num_NIL += 1 # NIL_set.add((mid, tid, score)) NIL_set.add((mid, tid, 1.0)) else: num_target += 1 row.append(mid) col.append(tid) # val.append(score) val.append(1.0) if num_target < 1: print 'No target type entity seeded!!!!' ### random sample NIL examples # neg_size = num_NIL neg_size = min(num_NIL, 5*num_target) # neg_size = int(min(num_NIL, num_target/(size_col-1.0))) neg_example = rn.sample(NIL_set, neg_size) for entry in neg_example: row.append(entry[0]) col.append(entry[1]) val.append(entry[2]) Y = coo_matrix((val, (row, col)), shape = (size_row, size_col)).tocsr() # print Y.nnz, '#ground truth mentions in Y' print 'Percent Seeded Mention:', (Y.nnz+0.0)/len(mid_mention) * 100, '% of', len(mid_mention), \ ', #target/All = ', num_target/(Y.nnz+0.0) * 100 return Y def update_Y_closed_form(S_M, Y, Y0, Theta, PiC, gamma, mu): # row = [] # col = [] # val = [] for j in range(PiC.shape[1]): # for each candidate j, slicing to get submatrix mid_list = PiC[:, j].nonzero()[0].tolist() Y0_j = Y0[mid_list, :] Theta_j = Theta[mid_list, :] S_M_j = S_M[mid_list, :][:, mid_list] if S_M_j.shape[0] * S_M_j.shape[1] < 2520800000: # transform to dense matrix tmp = ((1+gamma+mu)*identity(len(mid_list)) - gamma*S_M_j).todense() Y_j = npl.inv(tmp) * (Theta_j + mu*Y0_j) Y[mid_list, :] = Y_j # # sparse # Yc = spsl.inv((1+gamma+mu)*identity(len(mid_list)) - gamma*S_M_j) * (Theta_j + mu*Y0_j) # Yc = spsl.spsolve( ((1+gamma+mu)*identity(len(mid_list)) - gamma*S_M_j), (Theta_j + mu*Y0_j) ) # row_idx, col_idx = Yc.nonzero() # for i in range(len(row_idx)): # mid = mid_list[row_idx[i]] # row.append(mid) # col.append(col_idx[i]) # val.append(Yc[row_idx[i], col_idx[i]]) if j % 1000 == 0: print 'candidate', j # Y = coo_matrix((val, (row, col)), shape = Y0.shape).tocsr() return Y def inverse_matrix(X): X.data[:] = 1/(X.data) return X def clustype_appx(S_L, S_R, S_M, PiC, PiL, PiR, Y0, lambda_O, gamma, mu, T, ITER, K): PiLL = PiL.T*PiL PiRR = PiR.T*PiR ### initialization ############################################################# m = PiC.shape[0] n, l = S_L.shape C = create_dense_matrix(n, T) PL = create_dense_matrix(l, T) PR = create_dense_matrix(l, T) Y = Y0.copy() Theta = PiC*C + PiL*PL + PiR*PR obj = trace(2*C.T*C + PL.T*PL + PR.T*PR - 2*C.T*S_L*PL - 2*C.T*S_R*PR) + \ lambda_O * (norm(Y-Theta,ord='fro')**2 + mu*norm(Y-Y0,ord='fro')**2 + gamma*trace(Y.T*Y-Y.T*S_M*Y)) ### Start algorithm ############################################################# for i in range(ITER): lambda4 = 1+gamma+mu Y = 1/lambda4 * (gamma*S_M*Y + Theta + mu*Y0) C = 1/(2+lambda_O) * ( S_L*PL + S_R*PR + lambda_O*PiC.T*(Y-PiL*PL-PiR*PR) ) PL = inverse_matrix(identity(PiL.shape[1]) + lambda_O*PiLL) * (S_L.T*C + lambda_O*PiL.T*(Y-PiC*C-PiR*PR)) PR = inverse_matrix(identity(PiR.shape[1]) + lambda_O*PiRR) * (S_R.T*C + lambda_O*PiR.T*(Y-PiC*C-PiL*PL)) obj_old = obj Theta = PiC*C + PiL*PL + PiR*PR obj = trace(2*C.T*C + PL.T*PL + PR.T*PR - 2*C.T*S_L*PL - 2*C.T*S_R*PR) + \ lambda_O * (norm(Y-Theta,ord='fro')**2 + mu*norm(Y-Y0,ord='fro')**2 + gamma*trace(Y.T*Y-Y.T*S_M*Y)) if (i+1) % 10 == 0: print 'iter', i+1, 'obj: ', obj, 'rel obj change: ', (obj_old-obj)/obj_old # Y = PiC*C # Y = PiL*PL + PiR*PR Y = PiC*C + PiL*PL + PiR*PR return (Y, C, PL, PR) def clustype_noClus_inner(S_L, S_R, S_M, PiC, PiL, PiR, Y0, lambda_O, gamma, mu, T, ITER, tol, C, PL, PR, Y): PiLL = PiL.T*PiL PiRR = PiR.T*PiR ### initialization ############################################################# m = PiC.shape[0] n, l = S_L.shape Theta = PiC*C + PiL*PL + PiR*PR obj = trace(2*C.T*C + PL.T*PL + PR.T*PR - 2*C.T*S_L*PL - 2*C.T*S_R*PR) + \ lambda_O * (norm(Y-Theta,ord='fro')**2 + mu*norm(Y-Y0,ord='fro')**2 + gamma*trace(Y.T*Y-Y.T*S_M*Y)) ### Start algorithm ############################################################# for i in range(ITER): lambda4 = 1+gamma+mu Y = 1/lambda4 * (gamma*S_M*Y + Theta + mu*Y0) C = 1/(2+lambda_O) * ( S_L*PL + S_R*PR + lambda_O*PiC.T*(Y-PiL*PL-PiR*PR) ) PL = inverse_matrix(identity(PiL.shape[1]) + lambda_O*PiLL) * (S_L.T*C + lambda_O*PiL.T*(Y-PiC*C-PiR*PR)) PR = inverse_matrix(identity(PiR.shape[1]) + lambda_O*PiRR) * (S_R.T*C + lambda_O*PiR.T*(Y-PiC*C-PiL*PL)) obj_old = obj Theta = PiC*C + PiL*PL + PiR*PR obj = trace(2*C.T*C + PL.T*PL + PR.T*PR - 2*C.T*S_L*PL - 2*C.T*S_R*PR) + \ lambda_O * (norm(Y-Theta,ord='fro')**2 + mu*norm(Y-Y0,ord='fro')**2 + gamma*trace(Y.T*Y-Y.T*S_M*Y)) rel = abs(obj_old - obj)/obj_old if (i+1) % 10 == 0: print '\tClusType_noClus_inner Iter', i+1, 'obj: ', obj, 'rel obj change: ', (obj_old-obj)/obj_old if rel < tol: print ' ClusType_noClus_inner Converges!' Y = PiC*C + PiL*PL + PiR*PR return (Y, C, PL, PR) # Y = PiC*C # Y = PiL*PL + PiR*PR Y = PiC*C + PiL*PL + PiR*PR print ' ClusType_noClus_inner Reach MaxIter!' return (Y, C, PL, PR) def clustype_noClus_PiLR(S_L, S_R, S_M, PiC, PiL, PiR, Y0, lambda_O, gamma, mu, T, ITER): ### pre-compuatation ############################################################# m = PiC.shape[0] n, l = S_L.shape PiLL = PiL.T*PiL # l-by-l PiRR = PiR.T*PiR # l-by-l ### initialization ############################################################# C = create_dense_matrix(n, T) PL = create_dense_matrix(l, T) PR = create_dense_matrix(l, T) Y = Y0.copy() theta = PiL*PL + PiR*PR obj = trace(2*C.T*C + PL.T*PL + PR.T*PR - 2*C.T*S_L*PL - 2*C.T*S_R*PR) + \ lambda_O*(norm(Y-theta,ord='fro')**2 + mu*norm(Y-Y0,ord='fro')**2 + gamma*trace(Y.T*Y-Y.T*S_M*Y)) ### Start algorithm ############################################################# for i in range(ITER): lambda4 = 1+gamma+mu Y = 1/lambda4 * (gamma*S_M*Y + theta + mu*Y0) C = 1/2.0 * ( S_L*PL + S_R*PR ) PL = inverse_matrix(identity(PiL.shape[1]) + lambda_O*PiLL) * lambda_O*PiL.T*(Y-PiR*PR) PR = inverse_matrix(identity(PiR.shape[1]) + lambda_O*PiRR) * lambda_O*PiR.T*(Y-PiL*PL) obj_old = obj theta = PiL*PL + PiR*PR obj = trace(2*C.T*C + PL.T*PL + PR.T*PR - 2*C.T*S_L*PL - 2*C.T*S_R*PR) + \ lambda_O * (norm(Y-theta,ord='fro')**2 + mu*norm(Y-Y0,ord='fro')**2 + gamma*trace(Y.T*Y-Y.T*S_M*Y)) if (i+1) % 10 == 0: print 'iter', i+1, 'obj: ', obj, 'rel obj change: ', (obj_old-obj)/obj_old Y = PiL*PL + PiR*PR return Y def clustype_noClus_PiC(S_L, S_R, S_M, PiC, PiL, PiR, Y0, lambda_O, gamma, mu, T, ITER): ### initialization ############################################################# m = PiC.shape[0] n, l = S_L.shape C = create_dense_matrix(n, T) PL = create_dense_matrix(l, T) PR = create_dense_matrix(l, T) Y = Y0.copy() obj = trace(2*C.T*C + PL.T*PL + PR.T*PR - 2*C.T*S_L*PL - 2*C.T*S_R*PR) + \ lambda_O * (norm(Y-PiC*C,ord='fro')**2 + mu*norm(Y-Y0,ord='fro')**2 + gamma*trace(Y.T*Y-Y.T*S_M*Y)) ### Start algorithm ############################################################# for i in range(ITER): lambda4 = 1+gamma+mu Y = 1/lambda4 * (gamma*S_M*Y + PiC*C + mu*Y0) C = 1/(2+lambda_O) * ( S_L*PL + S_R*PR + lambda_O*PiC.T*Y ) PL = S_L.T*C PR = S_R.T*C obj_old = obj obj = trace(2*C.T*C + PL.T*PL + PR.T*PR - 2*C.T*S_L*PL - 2*C.T*S_R*PR) + \ lambda_O * (norm(Y-PiC*C,ord='fro')**2 + mu*norm(Y-Y0,ord='fro')**2 + gamma*trace(Y.T*Y-Y.T*S_M*Y)) if (i+1) % 10 == 0: print 'iter', i+1, 'obj: ', obj, 'rel obj change: ', (obj_old-obj)/obj_old Y = PiC*C return Y def clustype_onlycandidate(S_L, S_R, PiC, PiL, PiR, Y0, T, ITER): ### pre-compuatation ############################################################# u = 0.5 # u=0.5 ### initialization ############################################################# m = PiC.shape[0] n, l = S_L.shape C0 = PiC.T * Y0 C = C0.copy() PL = create_dense_matrix(l, T) PR = create_dense_matrix(l, T) Theta = PiC*C + PiL*PL + PiR*PR obj = trace((2+u)*C.T*C + PL.T*PL + PR.T*PR - 2*C.T*S_L*PL - 2*C.T*S_R*PR - 2*u*C.T*C0 + u*C0.T*C0) ### Start algorithm ############################################################# for i in range(ITER): C = 1/(2+u) * (S_L*PL + S_R*PR + u*C0) PL = S_L.T*C PR = S_R.T*C obj_old = obj obj = trace((2+u)*C.T*C + PL.T*PL + PR.T*PR - 2*C.T*S_L*PL - 2*C.T*S_R*PR - 2*u*C.T*C0 + u*C0.T*C0) if (i+1) % 10 == 0: print 'ClusType_Cand Iter', i+1, 'obj: ', obj, 'rel obj change: ', (obj_old-obj)/obj_old Y = PiC*C return (Y, C, PL, PR)
gpl-3.0
eg-zhang/scikit-learn
benchmarks/bench_mnist.py
76
6136
""" ======================= MNIST dataset benchmark ======================= Benchmark on the MNIST dataset. The dataset comprises 70,000 samples and 784 features. Here, we consider the task of predicting 10 classes - digits from 0 to 9 from their raw images. By contrast to the covertype dataset, the feature space is homogenous. Example of output : [..] Classification performance: =========================== Classifier train-time test-time error-rat ------------------------------------------------------------ Nystroem-SVM 105.07s 0.91s 0.0227 ExtraTrees 48.20s 1.22s 0.0288 RandomForest 47.17s 1.21s 0.0304 SampledRBF-SVM 140.45s 0.84s 0.0486 CART 22.84s 0.16s 0.1214 dummy 0.01s 0.02s 0.8973 """ from __future__ import division, print_function # Author: Issam H. Laradji # Arnaud Joly <arnaud.v.joly@gmail.com> # License: BSD 3 clause import os from time import time import argparse import numpy as np from sklearn.datasets import fetch_mldata from sklearn.datasets import get_data_home from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.dummy import DummyClassifier from sklearn.externals.joblib import Memory from sklearn.kernel_approximation import Nystroem from sklearn.kernel_approximation import RBFSampler from sklearn.metrics import zero_one_loss from sklearn.pipeline import make_pipeline from sklearn.svm import LinearSVC from sklearn.tree import DecisionTreeClassifier from sklearn.utils import check_array from sklearn.linear_model import LogisticRegression # Memoize the data extraction and memory map the resulting # train / test splits in readonly mode memory = Memory(os.path.join(get_data_home(), 'mnist_benchmark_data'), mmap_mode='r') @memory.cache def load_data(dtype=np.float32, order='F'): """Load the data, then cache and memmap the train/test split""" ###################################################################### ## Load dataset print("Loading dataset...") data = fetch_mldata('MNIST original') X = check_array(data['data'], dtype=dtype, order=order) y = data["target"] # Normalize features X = X / 255 ## Create train-test split (as [Joachims, 2006]) print("Creating train-test split...") n_train = 60000 X_train = X[:n_train] y_train = y[:n_train] X_test = X[n_train:] y_test = y[n_train:] return X_train, X_test, y_train, y_test ESTIMATORS = { "dummy": DummyClassifier(), 'CART': DecisionTreeClassifier(), 'ExtraTrees': ExtraTreesClassifier(n_estimators=100), 'RandomForest': RandomForestClassifier(n_estimators=100), 'Nystroem-SVM': make_pipeline(Nystroem(gamma=0.015, n_components=1000), LinearSVC(C=100)), 'SampledRBF-SVM': make_pipeline(RBFSampler(gamma=0.015, n_components=1000), LinearSVC(C=100)), 'LinearRegression-SAG': LogisticRegression(solver='sag', tol=1e-1, C=1e4) } if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('--classifiers', nargs="+", choices=ESTIMATORS, type=str, default=['ExtraTrees', 'Nystroem-SVM'], help="list of classifiers to benchmark.") parser.add_argument('--n-jobs', nargs="?", default=1, type=int, help="Number of concurrently running workers for " "models that support parallelism.") parser.add_argument('--order', nargs="?", default="C", type=str, choices=["F", "C"], help="Allow to choose between fortran and C ordered " "data") parser.add_argument('--random-seed', nargs="?", default=0, type=int, help="Common seed used by random number generator.") args = vars(parser.parse_args()) print(__doc__) X_train, X_test, y_train, y_test = load_data(order=args["order"]) print("") print("Dataset statistics:") print("===================") print("%s %d" % ("number of features:".ljust(25), X_train.shape[1])) print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size)) print("%s %s" % ("data type:".ljust(25), X_train.dtype)) print("%s %d (size=%dMB)" % ("number of train samples:".ljust(25), X_train.shape[0], int(X_train.nbytes / 1e6))) print("%s %d (size=%dMB)" % ("number of test samples:".ljust(25), X_test.shape[0], int(X_test.nbytes / 1e6))) print() print("Training Classifiers") print("====================") error, train_time, test_time = {}, {}, {} for name in sorted(args["classifiers"]): print("Training %s ... " % name, end="") estimator = ESTIMATORS[name] estimator_params = estimator.get_params() estimator.set_params(**{p: args["random_seed"] for p in estimator_params if p.endswith("random_state")}) if "n_jobs" in estimator_params: estimator.set_params(n_jobs=args["n_jobs"]) time_start = time() estimator.fit(X_train, y_train) train_time[name] = time() - time_start time_start = time() y_pred = estimator.predict(X_test) test_time[name] = time() - time_start error[name] = zero_one_loss(y_test, y_pred) print("done") print() print("Classification performance:") print("===========================") print("{0: <24} {1: >10} {2: >11} {3: >12}" "".format("Classifier ", "train-time", "test-time", "error-rate")) print("-" * 60) for name in sorted(args["classifiers"], key=error.get): print("{0: <23} {1: >10.2f}s {2: >10.2f}s {3: >12.4f}" "".format(name, train_time[name], test_time[name], error[name])) print()
bsd-3-clause
RPGOne/Skynet
scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/examples/linear_model/plot_iris_logistic.py
283
1678
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= Logistic Regression 3-class Classifier ========================================================= Show below is a logistic-regression classifiers decision boundaries on the `iris <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The datapoints are colored according to their labels. """ print(__doc__) # Code source: Gaël Varoquaux # Modified for documentation by Jaques Grobler # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn import linear_model, datasets # import some data to play with iris = datasets.load_iris() X = iris.data[:, :2] # we only take the first two features. Y = iris.target h = .02 # step size in the mesh logreg = linear_model.LogisticRegression(C=1e5) # we create an instance of Neighbours Classifier and fit the data. logreg.fit(X, Y) # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, m_max]x[y_min, y_max]. x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5 y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.figure(1, figsize=(4, 3)) plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired) # Plot also the training points plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired) plt.xlabel('Sepal length') plt.ylabel('Sepal width') plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.xticks(()) plt.yticks(()) plt.show()
bsd-3-clause
clemkoa/scikit-learn
examples/plot_johnson_lindenstrauss_bound.py
39
7489
r""" ===================================================================== The Johnson-Lindenstrauss bound for embedding with random projections ===================================================================== The `Johnson-Lindenstrauss lemma`_ states that any high dimensional dataset can be randomly projected into a lower dimensional Euclidean space while controlling the distortion in the pairwise distances. .. _`Johnson-Lindenstrauss lemma`: https://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma Theoretical bounds ================== The distortion introduced by a random projection `p` is asserted by the fact that `p` is defining an eps-embedding with good probability as defined by: .. math:: (1 - eps) \|u - v\|^2 < \|p(u) - p(v)\|^2 < (1 + eps) \|u - v\|^2 Where u and v are any rows taken from a dataset of shape [n_samples, n_features] and p is a projection by a random Gaussian N(0, 1) matrix with shape [n_components, n_features] (or a sparse Achlioptas matrix). The minimum number of components to guarantees the eps-embedding is given by: .. math:: n\_components >= 4 log(n\_samples) / (eps^2 / 2 - eps^3 / 3) The first plot shows that with an increasing number of samples ``n_samples``, the minimal number of dimensions ``n_components`` increased logarithmically in order to guarantee an ``eps``-embedding. The second plot shows that an increase of the admissible distortion ``eps`` allows to reduce drastically the minimal number of dimensions ``n_components`` for a given number of samples ``n_samples`` Empirical validation ==================== We validate the above bounds on the digits dataset or on the 20 newsgroups text document (TF-IDF word frequencies) dataset: - for the digits dataset, some 8x8 gray level pixels data for 500 handwritten digits pictures are randomly projected to spaces for various larger number of dimensions ``n_components``. - for the 20 newsgroups dataset some 500 documents with 100k features in total are projected using a sparse random matrix to smaller euclidean spaces with various values for the target number of dimensions ``n_components``. The default dataset is the digits dataset. To run the example on the twenty newsgroups dataset, pass the --twenty-newsgroups command line argument to this script. For each value of ``n_components``, we plot: - 2D distribution of sample pairs with pairwise distances in original and projected spaces as x and y axis respectively. - 1D histogram of the ratio of those distances (projected / original). We can see that for low values of ``n_components`` the distribution is wide with many distorted pairs and a skewed distribution (due to the hard limit of zero ratio on the left as distances are always positives) while for larger values of n_components the distortion is controlled and the distances are well preserved by the random projection. Remarks ======= According to the JL lemma, projecting 500 samples without too much distortion will require at least several thousands dimensions, irrespective of the number of features of the original dataset. Hence using random projections on the digits dataset which only has 64 features in the input space does not make sense: it does not allow for dimensionality reduction in this case. On the twenty newsgroups on the other hand the dimensionality can be decreased from 56436 down to 10000 while reasonably preserving pairwise distances. """ print(__doc__) import sys from time import time import numpy as np import matplotlib.pyplot as plt from sklearn.random_projection import johnson_lindenstrauss_min_dim from sklearn.random_projection import SparseRandomProjection from sklearn.datasets import fetch_20newsgroups_vectorized from sklearn.datasets import load_digits from sklearn.metrics.pairwise import euclidean_distances # Part 1: plot the theoretical dependency between n_components_min and # n_samples # range of admissible distortions eps_range = np.linspace(0.1, 0.99, 5) colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range))) # range of number of samples (observation) to embed n_samples_range = np.logspace(1, 9, 9) plt.figure() for eps, color in zip(eps_range, colors): min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps) plt.loglog(n_samples_range, min_n_components, color=color) plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right") plt.xlabel("Number of observations to eps-embed") plt.ylabel("Minimum number of dimensions") plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components") # range of admissible distortions eps_range = np.linspace(0.01, 0.99, 100) # range of number of samples (observation) to embed n_samples_range = np.logspace(2, 6, 5) colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range))) plt.figure() for n_samples, color in zip(n_samples_range, colors): min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range) plt.semilogy(eps_range, min_n_components, color=color) plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right") plt.xlabel("Distortion eps") plt.ylabel("Minimum number of dimensions") plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps") # Part 2: perform sparse random projection of some digits images which are # quite low dimensional and dense or documents of the 20 newsgroups dataset # which is both high dimensional and sparse if '--twenty-newsgroups' in sys.argv: # Need an internet connection hence not enabled by default data = fetch_20newsgroups_vectorized().data[:500] else: data = load_digits().data[:500] n_samples, n_features = data.shape print("Embedding %d samples with dim %d using various random projections" % (n_samples, n_features)) n_components_range = np.array([300, 1000, 10000]) dists = euclidean_distances(data, squared=True).ravel() # select only non-identical samples pairs nonzero = dists != 0 dists = dists[nonzero] for n_components in n_components_range: t0 = time() rp = SparseRandomProjection(n_components=n_components) projected_data = rp.fit_transform(data) print("Projected %d samples from %d to %d in %0.3fs" % (n_samples, n_features, n_components, time() - t0)) if hasattr(rp, 'components_'): n_bytes = rp.components_.data.nbytes n_bytes += rp.components_.indices.nbytes print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6)) projected_dists = euclidean_distances( projected_data, squared=True).ravel()[nonzero] plt.figure() plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu) plt.xlabel("Pairwise squared distances in original space") plt.ylabel("Pairwise squared distances in projected space") plt.title("Pairwise distances distribution for n_components=%d" % n_components) cb = plt.colorbar() cb.set_label('Sample pairs counts') rates = projected_dists / dists print("Mean distances rate: %0.2f (%0.2f)" % (np.mean(rates), np.std(rates))) plt.figure() plt.hist(rates, bins=50, normed=True, range=(0., 2.), edgecolor='k') plt.xlabel("Squared distances rate: projected / original") plt.ylabel("Distribution of samples pairs") plt.title("Histogram of pairwise distance rates for n_components=%d" % n_components) # TODO: compute the expected value of eps and add them to the previous plot # as vertical lines / region plt.show()
bsd-3-clause
magic2du/contact_matrix
Contact_maps/DeepLearning/DeepLearningTool/DL_contact_matrix_load2-new10fold_01_09_2015_01.py
1
25014
# coding: utf-8 # In[1]: # this part imports libs and load data from csv file import sys sys.path.append('../../../libs/') import csv from dateutil import parser from datetime import timedelta from sklearn import svm import numpy as np import pandas as pd import pickle from sklearn.cross_validation import train_test_split from sklearn import preprocessing import sklearn import scipy.stats as ss import cPickle import gzip import os import time import numpy import theano import theano.tensor as T from theano.tensor.shared_randomstreams import RandomStreams import os.path import IO_class from IO_class import FileOperator from sklearn import cross_validation import sklearn import numpy as np import csv from dateutil import parser from datetime import timedelta from sklearn import svm import numpy as np import pandas as pd import pdb, PIL import pickle import numpy as np from sklearn.cross_validation import train_test_split from sklearn.cross_validation import KFold from sklearn import preprocessing import sklearn import scipy.stats as ss from sklearn.svm import LinearSVC import random from DL_libs import * from itertools import izip #new import math from sklearn.svm import SVC # In[2]: # set settings for this script settings = {} settings['with_auc_score'] = False settings['reduce_ratio'] = 1 settings['SVM'] = 1 settings['SVM_RBF'] = 1 settings['SVM_POLY'] = 1 settings['DL'] = 1 settings['Log'] = 1 settings['SAE_SVM'] = 1 settings['SAE_SVM_RBF'] = 1 settings['SAE_SVM_POLY'] = 1 settings['DL_S'] = 1 settings['SAE_S_SVM'] = 1 settings['SAE_S_SVM_RBF'] = 1 settings['SAE_S_SVM_POLY'] = 1 settings['number_iterations'] = 10 settings['finetune_lr'] = 0.1 settings['batch_size'] = 30 settings['pretraining_interations'] = 50000#10000 settings['pretrain_lr'] = 0.001 #settings['training_epochs'] = 300 #300 settings['training_interations'] = 50000 #300 settings['hidden_layers_sizes'] = [200, 200, 200, 200, 200] settings['corruption_levels'] = [0.5, 0.5, 0.5, 0.5, 0.5 ] settings['number_of_training'] = [10000]#[1000, 2500, 5000, 7500, 10000] settings['test_set_from_test'] = True import logging import time current_date = time.strftime("%m_%d_%Y") logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) logname = 'log_DL_handwritten_digits' + current_date + '.log' handler = logging.FileHandler(logname) handler.setLevel(logging.DEBUG) # create a logging format formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') handler.setFormatter(formatter) # add the handlers to the logger logger.addHandler(handler) #logger.debug('This message should go to the log file') for key, value in settings.items(): logger.info(key +': '+ str(value)) # In[3]: f = gzip.open('mnist.pkl.gz', 'rb') train_set, valid_set, test_set = cPickle.load(f) X_train,y_train = train_set X_valid,y_valid = valid_set X_total=np.vstack((X_train, X_valid)) X_total = np.array(X_total, dtype= theano.config.floatX) print'sample size', X_total.shape y_total = np.concatenate([y_train, y_valid]) # In[5]: ################## generate data from training set################### array_A =[] array_B =[] for i in range(100000): array_A.append(np.random.random_integers(0, 59999)) array_B.append(np.random.random_integers(0, 59999)) pos_index = [] neg_index = [] for index in xrange(100000): if y_total[array_A[index]] - y_total[array_B[index]] == 1: pos_index.append(index) else: neg_index.append(index) print 'number of positive examples', len(pos_index) selected_neg_index= neg_index[ : len(pos_index)] array_A = np.array(array_A) array_B = np.array(array_B) index_for_positive_image_A = array_A[pos_index] index_for_positive_image_B = array_B[pos_index] index_for_neg_image_A = array_A[selected_neg_index] index_for_neg_image_B = array_B[selected_neg_index] X_pos_A = X_total[index_for_positive_image_A] X_pos_B = X_total[index_for_positive_image_B] X_pos_whole = np.hstack((X_pos_A,X_pos_B)) X_neg_A = X_total[index_for_neg_image_A] X_neg_B = X_total[index_for_neg_image_B] X_neg_whole = np.hstack((X_neg_A, X_neg_B)) print X_pos_A.shape, X_pos_B.shape, X_pos_whole.shape print X_neg_A.shape, X_neg_B.shape, X_neg_whole.shape X_whole = np.vstack((X_pos_whole, X_neg_whole)) print X_whole.shape y_pos = np.ones(X_pos_whole.shape[0]) y_neg = np.zeros(X_neg_whole.shape[0]) y_whole = np.concatenate([y_pos,y_neg]) print y_whole # In[7]: #pylab.imshow(imageB.reshape(28, 28), cmap="Greys") # In[8]: def saveAsCsv(with_auc_score, fname, score_dict, arguments): #new newfile = False if os.path.isfile('report_' + fname + '.csv'): pass else: newfile = True csvfile = open('report_' + fname + '.csv', 'a+') writer = csv.writer(csvfile) if newfile == True: writer.writerow(['no.', 'number_of_training', 'method', 'isTest']+ score_dict.keys()) #, 'AUC']) for arg in arguments: writer.writerow([i for i in arg]) csvfile.close() def run_models(settings = None): analysis_scr = [] with_auc_score = settings['with_auc_score'] for subset_no in xrange(1,settings['number_iterations']+1): print("Subset:", subset_no) ################## generate data ################### array_A =[] array_B =[] for i in range(100000): array_A.append(np.random.random_integers(0, 59999)) array_B.append(np.random.random_integers(0, 59999)) pos_index = [] neg_index = [] for index in xrange(100000): if y_total[array_A[index]] - y_total[array_B[index]] == 1: pos_index.append(index) else: neg_index.append(index) print 'number of positive examples', len(pos_index) selected_neg_index= neg_index[ : len(pos_index)] array_A = np.array(array_A) array_B = np.array(array_B) index_for_positive_image_A = array_A[pos_index] index_for_positive_image_B = array_B[pos_index] index_for_neg_image_A = array_A[selected_neg_index] index_for_neg_image_B = array_B[selected_neg_index] X_pos_A = X_total[index_for_positive_image_A] X_pos_B = X_total[index_for_positive_image_B] X_pos_whole = np.hstack((X_pos_A,X_pos_B)) X_neg_A = X_total[index_for_neg_image_A] X_neg_B = X_total[index_for_neg_image_B] X_neg_whole = np.hstack((X_neg_A, X_neg_B)) print X_pos_A.shape, X_pos_B.shape, X_pos_whole.shape print X_neg_A.shape, X_neg_B.shape, X_neg_whole.shape X_whole = np.vstack((X_pos_whole, X_neg_whole)) print X_whole.shape y_pos = np.ones(X_pos_whole.shape[0]) y_neg = np.zeros(X_neg_whole.shape[0]) y_whole = np.concatenate([y_pos,y_neg]) print y_whole.shape x_train_pre_validation, x_test, y_train_pre_validation, y_test = train_test_split(X_whole,y_whole, test_size=0.2, random_state=211) for number_of_training in settings['number_of_training']: x_train, x_validation, y_train, y_validation = train_test_split(x_train_pre_validation[:number_of_training], y_train_pre_validation[:number_of_training],\ test_size=0.2, random_state=21) print x_train.shape, y_train.shape, x_validation.shape, y_validation.shape, x_test.shape, y_test.shape x_train_minmax, x_validation_minmax, x_test_minmax = x_train, x_validation, x_test train_X_reduced = x_train train_y_reduced = y_train test_X = x_test test_y = y_test ###original data### ################ end of data #################### standard_scaler = preprocessing.StandardScaler().fit(train_X_reduced) scaled_train_X = standard_scaler.transform(train_X_reduced) scaled_test_X = standard_scaler.transform(test_X) if settings['SVM']: print "SVM" Linear_SVC = LinearSVC(C=1, penalty="l2") Linear_SVC.fit(scaled_train_X, y_train) predicted_test_y = Linear_SVC.predict(scaled_test_X) isTest = True; #new analysis_scr.append((subset_no, number_of_training, 'SVM', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new predicted_train_y = Linear_SVC.predict(scaled_train_X) isTest = False; #new analysis_scr.append(( subset_no,number_of_training, 'SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values())) if settings['SVM_RBF']: print "SVM_RBF" L1_SVC_RBF_Selector = SVC(C=1, gamma=0.01, kernel='rbf').fit(scaled_train_X, y_train) predicted_test_y = L1_SVC_RBF_Selector.predict(scaled_test_X) isTest = True; #new analysis_scr.append((subset_no, number_of_training, 'SVM_RBF', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new predicted_train_y = L1_SVC_RBF_Selector.predict(scaled_train_X) isTest = False; #new analysis_scr.append((subset_no,number_of_training, 'SVM_RBF', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values())) if settings['SVM_POLY']: print "SVM_POLY" L1_SVC_POLY_Selector = SVC(C=1, kernel='poly').fit(scaled_train_X, train_y_reduced) predicted_test_y = L1_SVC_POLY_Selector.predict(scaled_test_X) isTest = True; #new analysis_scr.append(( subset_no, number_of_training,'SVM_POLY', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new predicted_train_y = L1_SVC_POLY_Selector.predict(scaled_train_X) isTest = False; #new analysis_scr.append((subset_no, number_of_training,'SVM_POLY', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values())) if settings['Log']: print "Log" log_clf_l2 = sklearn.linear_model.LogisticRegression(C=1, penalty='l2') log_clf_l2.fit(scaled_train_X, train_y_reduced) predicted_test_y = log_clf_l2.predict(scaled_test_X) isTest = True; #new analysis_scr.append((subset_no,number_of_training, 'Log', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new predicted_train_y = log_clf_l2.predict(scaled_train_X) isTest = False; #new analysis_scr.append((subset_no, number_of_training,'Log', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values())) # direct deep learning finetune_lr = settings['finetune_lr'] batch_size = settings['batch_size'] pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size) #pretrain_lr=0.001 pretrain_lr = settings['pretrain_lr'] training_epochs = cal_epochs(settings['training_interations'], x_train_minmax, batch_size = batch_size) hidden_layers_sizes = settings['hidden_layers_sizes'] corruption_levels = settings['corruption_levels'] if settings['DL']: print "direct deep learning" sda = trainSda(x_train_minmax, y_train, x_validation_minmax, y_validation, x_test_minmax, test_y, hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \ training_epochs = training_epochs, pretraining_epochs = pretraining_epochs, pretrain_lr = pretrain_lr, finetune_lr=finetune_lr ) print 'hidden_layers_sizes:', hidden_layers_sizes print 'corruption_levels:', corruption_levels test_predicted = sda.predict(x_test_minmax) isTest = True; #new analysis_scr.append((subset_no,number_of_training, 'DL', isTest) + tuple(performance_score(y_test, test_predicted).values())) training_predicted = sda.predict(x_train_minmax) isTest = False; #new analysis_scr.append((subset_no,number_of_training, 'DL', isTest) + tuple(performance_score(y_train, training_predicted).values())) ####transformed original data#### x = train_X_reduced a_MAE_original = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size, hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels) new_x_train_minmax_A = a_MAE_original.transform(train_X_reduced) new_x_test_minmax_A = a_MAE_original.transform(x_test_minmax) standard_scaler = preprocessing.StandardScaler().fit(new_x_train_minmax_A) new_x_train_scaled = standard_scaler.transform(new_x_train_minmax_A) new_x_test_scaled = standard_scaler.transform(new_x_test_minmax_A) new_x_train_combo = np.hstack((scaled_train_X, new_x_train_scaled)) new_x_test_combo = np.hstack((scaled_test_X, new_x_test_scaled)) if settings['SAE_SVM']: # SAE_SVM print 'SAE followed by SVM' Linear_SVC = LinearSVC(C=1, penalty="l2") Linear_SVC.fit(new_x_train_scaled, train_y_reduced) predicted_test_y = Linear_SVC.predict(new_x_test_scaled) isTest = True; #new analysis_scr.append(( subset_no, number_of_training,'SAE_SVM', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new predicted_train_y = Linear_SVC.predict(new_x_train_scaled) isTest = False; #new analysis_scr.append(( subset_no, number_of_training,'SAE_SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values())) if settings['SAE_SVM_RBF']: # SAE_SVM print 'SAE followed by SVM RBF' L1_SVC_RBF_Selector = SVC(C=1, gamma=0.01, kernel='rbf').fit(new_x_train_scaled, train_y_reduced) predicted_test_y = L1_SVC_RBF_Selector.predict(new_x_test_scaled) isTest = True; #new analysis_scr.append((subset_no, number_of_training, 'SAE_SVM_RBF', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new predicted_train_y = L1_SVC_RBF_Selector.predict(new_x_train_scaled) isTest = False; #new analysis_scr.append((subset_no, number_of_training, 'SAE_SVM_RBF', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values())) if settings['SAE_SVM_POLY']: # SAE_SVM print 'SAE followed by SVM POLY' L1_SVC_RBF_Selector = SVC(C=1, kernel='poly').fit(new_x_train_scaled, train_y_reduced) predicted_test_y = L1_SVC_RBF_Selector.predict(new_x_test_scaled) isTest = True; #new analysis_scr.append((subset_no, number_of_training,'SAE_SVM_POLY', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new predicted_train_y = L1_SVC_RBF_Selector.predict(new_x_train_scaled) isTest = False; #new analysis_scr.append((subset_no, number_of_training, 'SAE_SVM_POLY', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values())) #### separated transformed data #### y_test = test_y print 'deep learning using split network' # get the new representation for A set. first 784-D pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size) x = x_train_minmax[:, :x_train_minmax.shape[1]/2] print "original shape for A", x.shape a_MAE_A = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size, hidden_layers_sizes = [x/2 for x in hidden_layers_sizes], corruption_levels=corruption_levels) new_x_train_minmax_A = a_MAE_A.transform(x_train_minmax[:, :x_train_minmax.shape[1]/2]) x = x_train_minmax[:, x_train_minmax.shape[1]/2:] print "original shape for B", x.shape a_MAE_B = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size, hidden_layers_sizes = [x/2 for x in hidden_layers_sizes], corruption_levels=corruption_levels) new_x_train_minmax_B = a_MAE_B.transform(x_train_minmax[:, x_train_minmax.shape[1]/2:]) new_x_test_minmax_A = a_MAE_A.transform(x_test_minmax[:, :x_test_minmax.shape[1]/2]) new_x_test_minmax_B = a_MAE_B.transform(x_test_minmax[:, x_test_minmax.shape[1]/2:]) new_x_validation_minmax_A = a_MAE_A.transform(x_validation_minmax[:, :x_validation_minmax.shape[1]/2]) new_x_validation_minmax_B = a_MAE_B.transform(x_validation_minmax[:, x_validation_minmax.shape[1]/2:]) new_x_train_minmax_whole = np.hstack((new_x_train_minmax_A, new_x_train_minmax_B)) new_x_test_minmax_whole = np.hstack((new_x_test_minmax_A, new_x_test_minmax_B)) new_x_validationt_minmax_whole = np.hstack((new_x_validation_minmax_A, new_x_validation_minmax_B)) standard_scaler = preprocessing.StandardScaler().fit(new_x_train_minmax_whole) new_x_train_minmax_whole_scaled = standard_scaler.transform(new_x_train_minmax_whole) new_x_test_minmax_whole_scaled = standard_scaler.transform(new_x_test_minmax_whole) if settings['DL_S']: # deep learning using split network sda_transformed = trainSda(new_x_train_minmax_whole, y_train, new_x_validationt_minmax_whole, y_validation , new_x_test_minmax_whole, y_test, hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \ training_epochs = training_epochs, pretraining_epochs = pretraining_epochs, pretrain_lr = pretrain_lr, finetune_lr=finetune_lr ) print 'hidden_layers_sizes:', hidden_layers_sizes print 'corruption_levels:', corruption_levels predicted_test_y = sda_transformed.predict(new_x_test_minmax_whole) y_test = test_y isTest = True; #new analysis_scr.append((subset_no, number_of_training,'DL_S', isTest) + tuple(performance_score(y_test, predicted_test_y, with_auc_score).values())) training_predicted = sda_transformed.predict(new_x_train_minmax_whole) isTest = False; #new analysis_scr.append((subset_no,number_of_training, 'DL_S', isTest) + tuple(performance_score(y_train, training_predicted, with_auc_score).values())) if settings['SAE_S_SVM']: print 'SAE_S followed by SVM' Linear_SVC = LinearSVC(C=1, penalty="l2") Linear_SVC.fit(new_x_train_minmax_whole_scaled, train_y_reduced) predicted_test_y = Linear_SVC.predict(new_x_test_minmax_whole_scaled) isTest = True; #new analysis_scr.append(( subset_no, number_of_training,'SAE_S_SVM', isTest) + tuple(performance_score(test_y, predicted_test_y, with_auc_score).values())) #new predicted_train_y = Linear_SVC.predict(new_x_train_minmax_whole_scaled) isTest = False; #new analysis_scr.append(( subset_no,number_of_training, 'SAE_S_SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y, with_auc_score).values())) if settings['SAE_S_SVM_RBF']: print 'SAE S followed by SVM RBF' L1_SVC_RBF_Selector = SVC(C=1, gamma=0.01, kernel='rbf').fit(new_x_train_minmax_whole_scaled, train_y_reduced) predicted_test_y = L1_SVC_RBF_Selector.predict(new_x_test_minmax_whole_scaled) isTest = True; #new analysis_scr.append((subset_no, number_of_training, 'SAE_S_SVM_RBF', isTest) + tuple(performance_score(test_y, predicted_test_y, with_auc_score).values())) #new predicted_train_y = L1_SVC_RBF_Selector.predict(new_x_train_minmax_whole_scaled) isTest = False; #new analysis_scr.append((subset_no, number_of_training,'SAE_S_SVM_RBF', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y, with_auc_score).values())) if settings['SAE_S_SVM_POLY']: # SAE_SVM print 'SAE S followed by SVM POLY' L1_SVC_RBF_Selector = SVC(C=1, kernel='poly').fit(new_x_train_minmax_whole_scaled, train_y_reduced) predicted_test_y = L1_SVC_RBF_Selector.predict(new_x_test_minmax_whole_scaled) isTest = True; #new analysis_scr.append((subset_no, number_of_training,'SAE_S_SVM_POLY', isTest) + tuple(performance_score(test_y, predicted_test_y, with_auc_score).values())) #new predicted_train_y = L1_SVC_RBF_Selector.predict(new_x_train_minmax_whole_scaled) isTest = False; #new analysis_scr.append((subset_no, number_of_training,'SAE_S_SVM_POLY', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y, with_auc_score).values())) report_name = 'DL_handwritten_digits' + '_size_'.join(map(str, hidden_layers_sizes)) + '_' + str(pretrain_lr) + '_' + str(finetune_lr) + '_' + '_' + str(settings['pretraining_interations']) + '_' + current_date saveAsCsv(with_auc_score, report_name, performance_score(test_y, predicted_test_y, with_auc_score), analysis_scr) return sda, a_MAE_original, a_MAE_A, a_MAE_B, analysis_scr # In[9]: sda, a_MAE_original, a_MAE_A, a_MAE_B, analysis_scr = run_models(settings) # In[48]: # save objects sda, a_MAE_original, a_MAE_A, a_MAE_B, analysis_scr with open('_'.join(map(str, settings['hidden_layers_sizes'])) +'_'.join(map(str, settings['corruption_levels']))+ '_' + current_date +'sda.pickle', 'wb') as handle: pickle.dump(sda, handle) with open('_'.join(map(str, settings['hidden_layers_sizes'])) +'_'.join(map(str, settings['corruption_levels']))+ '_' + current_date + 'a_MAE_original.pickle', 'wb') as handle: pickle.dump(a_MAE_original, handle) with open('_'.join(map(str, settings['hidden_layers_sizes'])) +'_'.join(map(str, settings['corruption_levels']))+ '_' + current_date + 'a_MAE_A.pickle', 'wb') as handle: pickle.dump(a_MAE_A, handle) with open('_'.join(map(str, settings['hidden_layers_sizes'])) +'_'.join(map(str, settings['corruption_levels']))+ '_' + current_date + 'a_MAE_B.pickle', 'wb') as handle: pickle.dump(a_MAE_B, handle) x = logging._handlers.copy() for i in x: log.removeHandler(i) i.flush() i.close() # In[ ]: # In[31]: ''' weights_map_to_input_space = [] StackedNNobject = sda image_dimension_x = 28*2 image_dimension_y = 28 if isinstance(StackedNNobject, SdA) or isinstance(StackedNNobject, MultipleAEs): weights_product = StackedNNobject.dA_layers[0].W.get_value(borrow=True) image = PIL.Image.fromarray(tile_raster_images( X=weights_product.T, img_shape=(image_dimension_x, image_dimension_y), tile_shape=(10, 10), tile_spacing=(1, 1))) sample_image_path = 'hidden_0_layer_weights.png' image.save(sample_image_path) weights_map_to_input_space.append(weights_product) for i_layer in range(1, len(StackedNNobject.dA_layers)): i_weigths = StackedNNobject.dA_layers[i_layer].W.get_value(borrow=True) weights_product = np.dot(weights_product, i_weigths) weights_map_to_input_space.append(weights_product) image = PIL.Image.fromarray(tile_raster_images( X=weights_product.T, img_shape=(image_dimension_x, image_dimension_y), tile_shape=(10, 10), tile_spacing=(1, 1))) sample_image_path = 'hidden_'+ str(i_layer)+ '_layer_weights.png' image.save(sample_image_path) ''' # In[18]:
gpl-2.0
rvraghav93/scikit-learn
sklearn/feature_extraction/tests/test_text.py
8
35969
from __future__ import unicode_literals import warnings from sklearn.feature_extraction.text import strip_tags from sklearn.feature_extraction.text import strip_accents_unicode from sklearn.feature_extraction.text import strip_accents_ascii from sklearn.feature_extraction.text import HashingVectorizer from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS from sklearn.model_selection import train_test_split from sklearn.model_selection import cross_val_score from sklearn.model_selection import GridSearchCV from sklearn.pipeline import Pipeline from sklearn.svm import LinearSVC from sklearn.base import clone import numpy as np from numpy.testing import assert_array_almost_equal from numpy.testing import assert_array_equal from numpy.testing import assert_raises from sklearn.utils.testing import (assert_equal, assert_false, assert_true, assert_not_equal, assert_almost_equal, assert_in, assert_less, assert_greater, assert_warns_message, assert_raise_message, clean_warning_registry, SkipTest) from collections import defaultdict, Mapping from functools import partial import pickle from io import StringIO JUNK_FOOD_DOCS = ( "the pizza pizza beer copyright", "the pizza burger beer copyright", "the the pizza beer beer copyright", "the burger beer beer copyright", "the coke burger coke copyright", "the coke burger burger", ) NOTJUNK_FOOD_DOCS = ( "the salad celeri copyright", "the salad salad sparkling water copyright", "the the celeri celeri copyright", "the tomato tomato salad water", "the tomato salad water copyright", ) ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS def uppercase(s): return strip_accents_unicode(s).upper() def strip_eacute(s): return s.replace('\xe9', 'e') def split_tokenize(s): return s.split() def lazy_analyze(s): return ['the_ultimate_feature'] def test_strip_accents(): # check some classical latin accentuated symbols a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb' expected = 'aaaaaaceeee' assert_equal(strip_accents_unicode(a), expected) a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd' expected = 'iiiinooooouuuuy' assert_equal(strip_accents_unicode(a), expected) # check some arabic a = '\u0625' # halef with a hamza below expected = '\u0627' # simple halef assert_equal(strip_accents_unicode(a), expected) # mix letters accentuated and not a = "this is \xe0 test" expected = 'this is a test' assert_equal(strip_accents_unicode(a), expected) def test_to_ascii(): # check some classical latin accentuated symbols a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb' expected = 'aaaaaaceeee' assert_equal(strip_accents_ascii(a), expected) a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd' expected = 'iiiinooooouuuuy' assert_equal(strip_accents_ascii(a), expected) # check some arabic a = '\u0625' # halef with a hamza below expected = '' # halef has no direct ascii match assert_equal(strip_accents_ascii(a), expected) # mix letters accentuated and not a = "this is \xe0 test" expected = 'this is a test' assert_equal(strip_accents_ascii(a), expected) def test_word_analyzer_unigrams(): for Vectorizer in (CountVectorizer, HashingVectorizer): wa = Vectorizer(strip_accents='ascii').build_analyzer() text = ("J'ai mang\xe9 du kangourou ce midi, " "c'\xe9tait pas tr\xeas bon.") expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi', 'etait', 'pas', 'tres', 'bon'] assert_equal(wa(text), expected) text = "This is a test, really.\n\n I met Harry yesterday." expected = ['this', 'is', 'test', 'really', 'met', 'harry', 'yesterday'] assert_equal(wa(text), expected) wa = Vectorizer(input='file').build_analyzer() text = StringIO("This is a test with a file-like object!") expected = ['this', 'is', 'test', 'with', 'file', 'like', 'object'] assert_equal(wa(text), expected) # with custom preprocessor wa = Vectorizer(preprocessor=uppercase).build_analyzer() text = ("J'ai mang\xe9 du kangourou ce midi, " " c'\xe9tait pas tr\xeas bon.") expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI', 'ETAIT', 'PAS', 'TRES', 'BON'] assert_equal(wa(text), expected) # with custom tokenizer wa = Vectorizer(tokenizer=split_tokenize, strip_accents='ascii').build_analyzer() text = ("J'ai mang\xe9 du kangourou ce midi, " "c'\xe9tait pas tr\xeas bon.") expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,', "c'etait", 'pas', 'tres', 'bon.'] assert_equal(wa(text), expected) def test_word_analyzer_unigrams_and_bigrams(): wa = CountVectorizer(analyzer="word", strip_accents='unicode', ngram_range=(1, 2)).build_analyzer() text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon." expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi', 'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du', 'du kangourou', 'kangourou ce', 'ce midi', 'midi etait', 'etait pas', 'pas tres', 'tres bon'] assert_equal(wa(text), expected) def test_unicode_decode_error(): # decode_error default to strict, so this should fail # First, encode (as bytes) a unicode string. text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon." text_bytes = text.encode('utf-8') # Then let the Analyzer try to decode it as ascii. It should fail, # because we have given it an incorrect encoding. wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer() assert_raises(UnicodeDecodeError, wa, text_bytes) ca = CountVectorizer(analyzer='char', ngram_range=(3, 6), encoding='ascii').build_analyzer() assert_raises(UnicodeDecodeError, ca, text_bytes) def test_char_ngram_analyzer(): cnga = CountVectorizer(analyzer='char', strip_accents='unicode', ngram_range=(3, 6)).build_analyzer() text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon" expected = ["j'a", "'ai", 'ai ', 'i m', ' ma'] assert_equal(cnga(text)[:5], expected) expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon'] assert_equal(cnga(text)[-5:], expected) text = "This \n\tis a test, really.\n\n I met Harry yesterday" expected = ['thi', 'his', 'is ', 's i', ' is'] assert_equal(cnga(text)[:5], expected) expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday'] assert_equal(cnga(text)[-5:], expected) cnga = CountVectorizer(input='file', analyzer='char', ngram_range=(3, 6)).build_analyzer() text = StringIO("This is a test with a file-like object!") expected = ['thi', 'his', 'is ', 's i', ' is'] assert_equal(cnga(text)[:5], expected) def test_char_wb_ngram_analyzer(): cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode', ngram_range=(3, 6)).build_analyzer() text = "This \n\tis a test, really.\n\n I met Harry yesterday" expected = [' th', 'thi', 'his', 'is ', ' thi'] assert_equal(cnga(text)[:5], expected) expected = ['yester', 'esterd', 'sterda', 'terday', 'erday '] assert_equal(cnga(text)[-5:], expected) cnga = CountVectorizer(input='file', analyzer='char_wb', ngram_range=(3, 6)).build_analyzer() text = StringIO("A test with a file-like object!") expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes'] assert_equal(cnga(text)[:6], expected) def test_countvectorizer_custom_vocabulary(): vocab = {"pizza": 0, "beer": 1} terms = set(vocab.keys()) # Try a few of the supported types. for typ in [dict, list, iter, partial(defaultdict, int)]: v = typ(vocab) vect = CountVectorizer(vocabulary=v) vect.fit(JUNK_FOOD_DOCS) if isinstance(v, Mapping): assert_equal(vect.vocabulary_, vocab) else: assert_equal(set(vect.vocabulary_), terms) X = vect.transform(JUNK_FOOD_DOCS) assert_equal(X.shape[1], len(terms)) def test_countvectorizer_custom_vocabulary_pipeline(): what_we_like = ["pizza", "beer"] pipe = Pipeline([ ('count', CountVectorizer(vocabulary=what_we_like)), ('tfidf', TfidfTransformer())]) X = pipe.fit_transform(ALL_FOOD_DOCS) assert_equal(set(pipe.named_steps['count'].vocabulary_), set(what_we_like)) assert_equal(X.shape[1], len(what_we_like)) def test_countvectorizer_custom_vocabulary_repeated_indeces(): vocab = {"pizza": 0, "beer": 0} try: CountVectorizer(vocabulary=vocab) except ValueError as e: assert_in("vocabulary contains repeated indices", str(e).lower()) def test_countvectorizer_custom_vocabulary_gap_index(): vocab = {"pizza": 1, "beer": 2} try: CountVectorizer(vocabulary=vocab) except ValueError as e: assert_in("doesn't contain index", str(e).lower()) def test_countvectorizer_stop_words(): cv = CountVectorizer() cv.set_params(stop_words='english') assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS) cv.set_params(stop_words='_bad_str_stop_') assert_raises(ValueError, cv.get_stop_words) cv.set_params(stop_words='_bad_unicode_stop_') assert_raises(ValueError, cv.get_stop_words) stoplist = ['some', 'other', 'words'] cv.set_params(stop_words=stoplist) assert_equal(cv.get_stop_words(), set(stoplist)) def test_countvectorizer_empty_vocabulary(): try: vect = CountVectorizer(vocabulary=[]) vect.fit(["foo"]) assert False, "we shouldn't get here" except ValueError as e: assert_in("empty vocabulary", str(e).lower()) try: v = CountVectorizer(max_df=1.0, stop_words="english") # fit on stopwords only v.fit(["to be or not to be", "and me too", "and so do you"]) assert False, "we shouldn't get here" except ValueError as e: assert_in("empty vocabulary", str(e).lower()) def test_fit_countvectorizer_twice(): cv = CountVectorizer() X1 = cv.fit_transform(ALL_FOOD_DOCS[:5]) X2 = cv.fit_transform(ALL_FOOD_DOCS[5:]) assert_not_equal(X1.shape[1], X2.shape[1]) def test_tf_idf_smoothing(): X = [[1, 1, 1], [1, 1, 0], [1, 0, 0]] tr = TfidfTransformer(smooth_idf=True, norm='l2') tfidf = tr.fit_transform(X).toarray() assert_true((tfidf >= 0).all()) # check normalization assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.]) # this is robust to features with only zeros X = [[1, 1, 0], [1, 1, 0], [1, 0, 0]] tr = TfidfTransformer(smooth_idf=True, norm='l2') tfidf = tr.fit_transform(X).toarray() assert_true((tfidf >= 0).all()) def test_tfidf_no_smoothing(): X = [[1, 1, 1], [1, 1, 0], [1, 0, 0]] tr = TfidfTransformer(smooth_idf=False, norm='l2') tfidf = tr.fit_transform(X).toarray() assert_true((tfidf >= 0).all()) # check normalization assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.]) # the lack of smoothing make IDF fragile in the presence of feature with # only zeros X = [[1, 1, 0], [1, 1, 0], [1, 0, 0]] tr = TfidfTransformer(smooth_idf=False, norm='l2') clean_warning_registry() with warnings.catch_warnings(record=True) as w: 1. / np.array([0.]) numpy_provides_div0_warning = len(w) == 1 in_warning_message = 'divide by zero' tfidf = assert_warns_message(RuntimeWarning, in_warning_message, tr.fit_transform, X).toarray() if not numpy_provides_div0_warning: raise SkipTest("Numpy does not provide div 0 warnings.") def test_sublinear_tf(): X = [[1], [2], [3]] tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None) tfidf = tr.fit_transform(X).toarray() assert_equal(tfidf[0], 1) assert_greater(tfidf[1], tfidf[0]) assert_greater(tfidf[2], tfidf[1]) assert_less(tfidf[1], 2) assert_less(tfidf[2], 3) def test_vectorizer(): # raw documents as an iterator train_data = iter(ALL_FOOD_DOCS[:-1]) test_data = [ALL_FOOD_DOCS[-1]] n_train = len(ALL_FOOD_DOCS) - 1 # test without vocabulary v1 = CountVectorizer(max_df=0.5) counts_train = v1.fit_transform(train_data) if hasattr(counts_train, 'tocsr'): counts_train = counts_train.tocsr() assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2) # build a vectorizer v1 with the same vocabulary as the one fitted by v1 v2 = CountVectorizer(vocabulary=v1.vocabulary_) # compare that the two vectorizer give the same output on the test sample for v in (v1, v2): counts_test = v.transform(test_data) if hasattr(counts_test, 'tocsr'): counts_test = counts_test.tocsr() vocabulary = v.vocabulary_ assert_equal(counts_test[0, vocabulary["salad"]], 1) assert_equal(counts_test[0, vocabulary["tomato"]], 1) assert_equal(counts_test[0, vocabulary["water"]], 1) # stop word from the fixed list assert_false("the" in vocabulary) # stop word found automatically by the vectorizer DF thresholding # words that are high frequent across the complete corpus are likely # to be not informative (either real stop words of extraction # artifacts) assert_false("copyright" in vocabulary) # not present in the sample assert_equal(counts_test[0, vocabulary["coke"]], 0) assert_equal(counts_test[0, vocabulary["burger"]], 0) assert_equal(counts_test[0, vocabulary["beer"]], 0) assert_equal(counts_test[0, vocabulary["pizza"]], 0) # test tf-idf t1 = TfidfTransformer(norm='l1') tfidf = t1.fit(counts_train).transform(counts_train).toarray() assert_equal(len(t1.idf_), len(v1.vocabulary_)) assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_))) # test tf-idf with new data tfidf_test = t1.transform(counts_test).toarray() assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_))) # test tf alone t2 = TfidfTransformer(norm='l1', use_idf=False) tf = t2.fit(counts_train).transform(counts_train).toarray() assert_false(hasattr(t2, "idf_")) # test idf transform with unlearned idf vector t3 = TfidfTransformer(use_idf=True) assert_raises(ValueError, t3.transform, counts_train) # test idf transform with incompatible n_features X = [[1, 1, 5], [1, 1, 0]] t3.fit(X) X_incompt = [[1, 3], [1, 3]] assert_raises(ValueError, t3.transform, X_incompt) # L1-normalized term frequencies sum to one assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train) # test the direct tfidf vectorizer # (equivalent to term count vectorizer + tfidf transformer) train_data = iter(ALL_FOOD_DOCS[:-1]) tv = TfidfVectorizer(norm='l1') tv.max_df = v1.max_df tfidf2 = tv.fit_transform(train_data).toarray() assert_false(tv.fixed_vocabulary_) assert_array_almost_equal(tfidf, tfidf2) # test the direct tfidf vectorizer with new data tfidf_test2 = tv.transform(test_data).toarray() assert_array_almost_equal(tfidf_test, tfidf_test2) # test transform on unfitted vectorizer with empty vocabulary v3 = CountVectorizer(vocabulary=None) assert_raises(ValueError, v3.transform, train_data) # ascii preprocessor? v3.set_params(strip_accents='ascii', lowercase=False) assert_equal(v3.build_preprocessor(), strip_accents_ascii) # error on bad strip_accents param v3.set_params(strip_accents='_gabbledegook_', preprocessor=None) assert_raises(ValueError, v3.build_preprocessor) # error with bad analyzer type v3.set_params = '_invalid_analyzer_type_' assert_raises(ValueError, v3.build_analyzer) def test_tfidf_vectorizer_setters(): tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False, sublinear_tf=False) tv.norm = 'l1' assert_equal(tv._tfidf.norm, 'l1') tv.use_idf = True assert_true(tv._tfidf.use_idf) tv.smooth_idf = True assert_true(tv._tfidf.smooth_idf) tv.sublinear_tf = True assert_true(tv._tfidf.sublinear_tf) def test_hashing_vectorizer(): v = HashingVectorizer() X = v.transform(ALL_FOOD_DOCS) token_nnz = X.nnz assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features)) assert_equal(X.dtype, v.dtype) # By default the hashed values receive a random sign and l2 normalization # makes the feature values bounded assert_true(np.min(X.data) > -1) assert_true(np.min(X.data) < 0) assert_true(np.max(X.data) > 0) assert_true(np.max(X.data) < 1) # Check that the rows are normalized for i in range(X.shape[0]): assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0) # Check vectorization with some non-default parameters v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1') X = v.transform(ALL_FOOD_DOCS) assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features)) assert_equal(X.dtype, v.dtype) # ngrams generate more non zeros ngrams_nnz = X.nnz assert_true(ngrams_nnz > token_nnz) assert_true(ngrams_nnz < 2 * token_nnz) # makes the feature values bounded assert_true(np.min(X.data) > 0) assert_true(np.max(X.data) < 1) # Check that the rows are normalized for i in range(X.shape[0]): assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0) def test_feature_names(): cv = CountVectorizer(max_df=0.5) # test for Value error on unfitted/empty vocabulary assert_raises(ValueError, cv.get_feature_names) X = cv.fit_transform(ALL_FOOD_DOCS) n_samples, n_features = X.shape assert_equal(len(cv.vocabulary_), n_features) feature_names = cv.get_feature_names() assert_equal(len(feature_names), n_features) assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza', 'salad', 'sparkling', 'tomato', 'water'], feature_names) for idx, name in enumerate(feature_names): assert_equal(idx, cv.vocabulary_.get(name)) def test_vectorizer_max_features(): vec_factories = ( CountVectorizer, TfidfVectorizer, ) expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza']) expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke', u'sparkling', u'water', u'the']) for vec_factory in vec_factories: # test bounded number of extracted features vectorizer = vec_factory(max_df=0.6, max_features=4) vectorizer.fit(ALL_FOOD_DOCS) assert_equal(set(vectorizer.vocabulary_), expected_vocabulary) assert_equal(vectorizer.stop_words_, expected_stop_words) def test_count_vectorizer_max_features(): # Regression test: max_features didn't work correctly in 0.14. cv_1 = CountVectorizer(max_features=1) cv_3 = CountVectorizer(max_features=3) cv_None = CountVectorizer(max_features=None) counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0) counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0) counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0) features_1 = cv_1.get_feature_names() features_3 = cv_3.get_feature_names() features_None = cv_None.get_feature_names() # The most common feature is "the", with frequency 7. assert_equal(7, counts_1.max()) assert_equal(7, counts_3.max()) assert_equal(7, counts_None.max()) # The most common feature should be the same assert_equal("the", features_1[np.argmax(counts_1)]) assert_equal("the", features_3[np.argmax(counts_3)]) assert_equal("the", features_None[np.argmax(counts_None)]) def test_vectorizer_max_df(): test_data = ['abc', 'dea', 'eat'] vect = CountVectorizer(analyzer='char', max_df=1.0) vect.fit(test_data) assert_true('a' in vect.vocabulary_.keys()) assert_equal(len(vect.vocabulary_.keys()), 6) assert_equal(len(vect.stop_words_), 0) vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5 vect.fit(test_data) assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain assert_true('a' in vect.stop_words_) assert_equal(len(vect.stop_words_), 2) vect.max_df = 1 vect.fit(test_data) assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain assert_true('a' in vect.stop_words_) assert_equal(len(vect.stop_words_), 2) def test_vectorizer_min_df(): test_data = ['abc', 'dea', 'eat'] vect = CountVectorizer(analyzer='char', min_df=1) vect.fit(test_data) assert_true('a' in vect.vocabulary_.keys()) assert_equal(len(vect.vocabulary_.keys()), 6) assert_equal(len(vect.stop_words_), 0) vect.min_df = 2 vect.fit(test_data) assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain assert_true('c' in vect.stop_words_) assert_equal(len(vect.stop_words_), 4) vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4 vect.fit(test_data) assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains assert_true('c' in vect.stop_words_) assert_equal(len(vect.stop_words_), 5) def test_count_binary_occurrences(): # by default multiple occurrences are counted as longs test_data = ['aaabc', 'abbde'] vect = CountVectorizer(analyzer='char', max_df=1.0) X = vect.fit_transform(test_data).toarray() assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names()) assert_array_equal([[3, 1, 1, 0, 0], [1, 2, 0, 1, 1]], X) # using boolean features, we can fetch the binary occurrence info # instead. vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True) X = vect.fit_transform(test_data).toarray() assert_array_equal([[1, 1, 1, 0, 0], [1, 1, 0, 1, 1]], X) # check the ability to change the dtype vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True, dtype=np.float32) X_sparse = vect.fit_transform(test_data) assert_equal(X_sparse.dtype, np.float32) def test_hashed_binary_occurrences(): # by default multiple occurrences are counted as longs test_data = ['aaabc', 'abbde'] vect = HashingVectorizer(analyzer='char', non_negative=True, norm=None) X = vect.transform(test_data) assert_equal(np.max(X[0:1].data), 3) assert_equal(np.max(X[1:2].data), 2) assert_equal(X.dtype, np.float64) # using boolean features, we can fetch the binary occurrence info # instead. vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True, norm=None) X = vect.transform(test_data) assert_equal(np.max(X.data), 1) assert_equal(X.dtype, np.float64) # check the ability to change the dtype vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True, norm=None, dtype=np.float64) X = vect.transform(test_data) assert_equal(X.dtype, np.float64) def test_vectorizer_inverse_transform(): # raw documents data = ALL_FOOD_DOCS for vectorizer in (TfidfVectorizer(), CountVectorizer()): transformed_data = vectorizer.fit_transform(data) inversed_data = vectorizer.inverse_transform(transformed_data) analyze = vectorizer.build_analyzer() for doc, inversed_terms in zip(data, inversed_data): terms = np.sort(np.unique(analyze(doc))) inversed_terms = np.sort(np.unique(inversed_terms)) assert_array_equal(terms, inversed_terms) # Test that inverse_transform also works with numpy arrays transformed_data = transformed_data.toarray() inversed_data2 = vectorizer.inverse_transform(transformed_data) for terms, terms2 in zip(inversed_data, inversed_data2): assert_array_equal(np.sort(terms), np.sort(terms2)) def test_count_vectorizer_pipeline_grid_selection(): # raw documents data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS # label junk food as -1, the others as +1 target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS) # split the dataset for model development and final evaluation train_data, test_data, target_train, target_test = train_test_split( data, target, test_size=.2, random_state=0) pipeline = Pipeline([('vect', CountVectorizer()), ('svc', LinearSVC())]) parameters = { 'vect__ngram_range': [(1, 1), (1, 2)], 'svc__loss': ('hinge', 'squared_hinge') } # find the best parameters for both the feature extraction and the # classifier grid_search = GridSearchCV(pipeline, parameters, n_jobs=1) # Check that the best model found by grid search is 100% correct on the # held out evaluation set. pred = grid_search.fit(train_data, target_train).predict(test_data) assert_array_equal(pred, target_test) # on this toy dataset bigram representation which is used in the last of # the grid_search is considered the best estimator since they all converge # to 100% accuracy models assert_equal(grid_search.best_score_, 1.0) best_vectorizer = grid_search.best_estimator_.named_steps['vect'] assert_equal(best_vectorizer.ngram_range, (1, 1)) def test_vectorizer_pipeline_grid_selection(): # raw documents data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS # label junk food as -1, the others as +1 target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS) # split the dataset for model development and final evaluation train_data, test_data, target_train, target_test = train_test_split( data, target, test_size=.1, random_state=0) pipeline = Pipeline([('vect', TfidfVectorizer()), ('svc', LinearSVC())]) parameters = { 'vect__ngram_range': [(1, 1), (1, 2)], 'vect__norm': ('l1', 'l2'), 'svc__loss': ('hinge', 'squared_hinge'), } # find the best parameters for both the feature extraction and the # classifier grid_search = GridSearchCV(pipeline, parameters, n_jobs=1) # Check that the best model found by grid search is 100% correct on the # held out evaluation set. pred = grid_search.fit(train_data, target_train).predict(test_data) assert_array_equal(pred, target_test) # on this toy dataset bigram representation which is used in the last of # the grid_search is considered the best estimator since they all converge # to 100% accuracy models assert_equal(grid_search.best_score_, 1.0) best_vectorizer = grid_search.best_estimator_.named_steps['vect'] assert_equal(best_vectorizer.ngram_range, (1, 1)) assert_equal(best_vectorizer.norm, 'l2') assert_false(best_vectorizer.fixed_vocabulary_) def test_vectorizer_pipeline_cross_validation(): # raw documents data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS # label junk food as -1, the others as +1 target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS) pipeline = Pipeline([('vect', TfidfVectorizer()), ('svc', LinearSVC())]) cv_scores = cross_val_score(pipeline, data, target, cv=3) assert_array_equal(cv_scores, [1., 1., 1.]) def test_vectorizer_unicode(): # tests that the count vectorizer works with cyrillic. document = ( "\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0" "\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0" "\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd" "\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7" "\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81" "\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3" "\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0" "\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87" "\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82" "\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80" "\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3" "\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81" "\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 " "\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1" "\x8f.") vect = CountVectorizer() X_counted = vect.fit_transform([document]) assert_equal(X_counted.shape, (1, 15)) vect = HashingVectorizer(norm=None, non_negative=True) X_hashed = vect.transform([document]) assert_equal(X_hashed.shape, (1, 2 ** 20)) # No collisions on such a small dataset assert_equal(X_counted.nnz, X_hashed.nnz) # When norm is None and non_negative, the tokens are counted up to # collisions assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data)) def test_tfidf_vectorizer_with_fixed_vocabulary(): # non regression smoke test for inheritance issues vocabulary = ['pizza', 'celeri'] vect = TfidfVectorizer(vocabulary=vocabulary) X_1 = vect.fit_transform(ALL_FOOD_DOCS) X_2 = vect.transform(ALL_FOOD_DOCS) assert_array_almost_equal(X_1.toarray(), X_2.toarray()) assert_true(vect.fixed_vocabulary_) def test_pickling_vectorizer(): instances = [ HashingVectorizer(), HashingVectorizer(norm='l1'), HashingVectorizer(binary=True), HashingVectorizer(ngram_range=(1, 2)), CountVectorizer(), CountVectorizer(preprocessor=strip_tags), CountVectorizer(analyzer=lazy_analyze), CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS), CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS), TfidfVectorizer(), TfidfVectorizer(analyzer=lazy_analyze), TfidfVectorizer().fit(JUNK_FOOD_DOCS), ] for orig in instances: s = pickle.dumps(orig) copy = pickle.loads(s) assert_equal(type(copy), orig.__class__) assert_equal(copy.get_params(), orig.get_params()) assert_array_equal( copy.fit_transform(JUNK_FOOD_DOCS).toarray(), orig.fit_transform(JUNK_FOOD_DOCS).toarray()) def test_countvectorizer_vocab_sets_when_pickling(): # ensure that vocabulary of type set is coerced to a list to # preserve iteration ordering after deserialization rng = np.random.RandomState(0) vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza', 'salad', 'sparkling', 'tomato', 'water']) for x in range(0, 100): vocab_set = set(rng.choice(vocab_words, size=5, replace=False)) cv = CountVectorizer(vocabulary=vocab_set) unpickled_cv = pickle.loads(pickle.dumps(cv)) cv.fit(ALL_FOOD_DOCS) unpickled_cv.fit(ALL_FOOD_DOCS) assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names()) def test_countvectorizer_vocab_dicts_when_pickling(): rng = np.random.RandomState(0) vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza', 'salad', 'sparkling', 'tomato', 'water']) for x in range(0, 100): vocab_dict = dict() words = rng.choice(vocab_words, size=5, replace=False) for y in range(0, 5): vocab_dict[words[y]] = y cv = CountVectorizer(vocabulary=vocab_dict) unpickled_cv = pickle.loads(pickle.dumps(cv)) cv.fit(ALL_FOOD_DOCS) unpickled_cv.fit(ALL_FOOD_DOCS) assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names()) def test_stop_words_removal(): # Ensure that deleting the stop_words_ attribute doesn't affect transform fitted_vectorizers = ( TfidfVectorizer().fit(JUNK_FOOD_DOCS), CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS), CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS) ) for vect in fitted_vectorizers: vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray() vect.stop_words_ = None stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray() delattr(vect, 'stop_words_') stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray() assert_array_equal(stop_None_transform, vect_transform) assert_array_equal(stop_del_transform, vect_transform) def test_pickling_transformer(): X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS) orig = TfidfTransformer().fit(X) s = pickle.dumps(orig) copy = pickle.loads(s) assert_equal(type(copy), orig.__class__) assert_array_equal( copy.fit_transform(X).toarray(), orig.fit_transform(X).toarray()) def test_non_unique_vocab(): vocab = ['a', 'b', 'c', 'a', 'a'] vect = CountVectorizer(vocabulary=vocab) assert_raises(ValueError, vect.fit, []) def test_hashingvectorizer_nan_in_docs(): # np.nan can appear when using pandas to load text fields from a csv file # with missing values. message = "np.nan is an invalid document, expected byte or unicode string." exception = ValueError def func(): hv = HashingVectorizer() hv.fit_transform(['hello world', np.nan, 'hello hello']) assert_raise_message(exception, message, func) def test_tfidfvectorizer_binary(): # Non-regression test: TfidfVectorizer used to ignore its "binary" param. v = TfidfVectorizer(binary=True, use_idf=False, norm=None) assert_true(v.binary) X = v.fit_transform(['hello world', 'hello hello']).toarray() assert_array_equal(X.ravel(), [1, 1, 1, 0]) X2 = v.transform(['hello world', 'hello hello']).toarray() assert_array_equal(X2.ravel(), [1, 1, 1, 0]) def test_tfidfvectorizer_export_idf(): vect = TfidfVectorizer(use_idf=True) vect.fit(JUNK_FOOD_DOCS) assert_array_almost_equal(vect.idf_, vect._tfidf.idf_) def test_vectorizer_vocab_clone(): vect_vocab = TfidfVectorizer(vocabulary=["the"]) vect_vocab_clone = clone(vect_vocab) vect_vocab.fit(ALL_FOOD_DOCS) vect_vocab_clone.fit(ALL_FOOD_DOCS) assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_) def test_vectorizer_string_object_as_input(): message = ("Iterable over raw text documents expected, " "string object received.") for vec in [CountVectorizer(), TfidfVectorizer(), HashingVectorizer()]: assert_raise_message( ValueError, message, vec.fit_transform, "hello world!") assert_raise_message( ValueError, message, vec.fit, "hello world!") assert_raise_message( ValueError, message, vec.transform, "hello world!")
bsd-3-clause
allinpaybusiness/ACS
allinpay projects/creditscoreMLP/classMLP.py
1
9585
# -*- coding: utf-8 -*- """ Spyder Editor This is a temporary script file. """ import sys; import os; sys.path.append("allinpay projects") from creditscore.creditscore import CreditScore import numpy as np import pandas as pd import time from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.model_selection import KFold from sklearn.feature_selection import VarianceThreshold from sklearn.feature_selection import RFECV from sklearn.feature_selection import SelectFromModel from sklearn.feature_selection import SelectKBest from sklearn.neural_network import MLPClassifier from sklearn.neural_network import MLPRegressor class CreditScoreMLP(CreditScore): def MLP_trainandtest(self, testsize, cv, feature_sel, varthreshold, activation,solver, alpha, max_iter =1000,nclusters=10, cmethod=None, *hidden_layer_sizes): #分割数据集为训练集和测试集 data_feature = self.data.ix[:, self.data.columns != 'default'] data_target = self.data['default'] X_train, X_test, y_train, y_test = train_test_split(data_feature, data_target, test_size=testsize, random_state=0) #对训练集做变量粗分类和woe转化,并据此对测试集做粗分类和woe转化 X_train, X_test = self.binandwoe_traintest(X_train, y_train, X_test, nclusters, cmethod) #在train中做变量筛选, sklearn.feature_selection中的方法 if feature_sel == "VarianceThreshold": selector = VarianceThreshold(threshold = varthreshold) X_train1 = pd.DataFrame(selector.fit_transform(X_train)) X_train1.columns = X_train.columns[selector.get_support(True)] X_test1 = X_test[X_train1.columns] elif feature_sel == "RFECV": estimator = LogisticRegression() selector = RFECV(estimator, step=1, cv=cv) X_train1 = pd.DataFrame(selector.fit_transform(X_train, y_train)) X_train1.columns = X_train.columns[selector.get_support(True)] X_test1 = X_test[X_train1.columns] elif feature_sel == "SelectFromModel": estimator = LogisticRegression() selector = SelectFromModel(estimator) X_train1 = pd.DataFrame(selector.fit_transform(X_train, y_train)) X_train1.columns = X_train.columns[selector.get_support(True)] X_test1 = X_test[X_train1.columns] elif feature_sel == "SelectKBest": selector = SelectKBest() X_train1 = pd.DataFrame(selector.fit_transform(X_train, y_train)) X_train1.columns = X_train.columns[selector.get_support(True)] X_test1 = X_test[X_train1.columns] else: X_train1, X_test1 = X_train, X_test #训练并预测模型 classifier = MLPClassifier(hidden_layer_sizes=hidden_layer_sizes, activation=activation,solver=solver,alpha=alpha, max_iter =1000) # 使用类,参数全是默认的 #为避免单次神经网络训练不收敛的情况,反复训练10次,最终预测概率为10次的平均值 probability = 0 for i in range(10): #训练模型 classifier.fit(X_train1, y_train) #预测概率 probability += classifier.predict_proba(X_test1)[:,1] probability = probability / 10 predresult = pd.DataFrame({'target' : y_test, 'probability' : probability}) return predresult def MLP_trainandtest_kfold(self, nsplit, cv, feature_sel, varthreshold, activation,solver, alpha, max_iter =1000,nclusters=10, cmethod=None, *hidden_layer_sizes): data_feature = self.data.ix[:, self.data.columns != 'default'] data_target = self.data['default'] #将数据集分割成k个分段分别进行训练和测试,对每个分段,该分段为测试集,其余数据为训练集 kf = KFold(n_splits=nsplit, shuffle=True) predresult = pd.DataFrame() for train_index, test_index in kf.split(data_feature): X_train, X_test = data_feature.iloc[train_index, ], data_feature.iloc[test_index, ] y_train, y_test = data_target.iloc[train_index, ], data_target.iloc[test_index, ] #如果随机抽样造成train或者test中只有一个分类,跳过此次预测 if (len(y_train.unique()) == 1) or (len(y_test.unique()) == 1): continue #对训练集做变量粗分类和woe转化,并据此对测试集做粗分类和woe转化 X_train, X_test = self.binandwoe_traintest(X_train, y_train, X_test, nclusters, cmethod) #在train中做变量筛选, sklearn.feature_selection中的方法 if feature_sel == "VarianceThreshold": selector = VarianceThreshold(threshold = varthreshold) X_train1 = pd.DataFrame(selector.fit_transform(X_train)) X_train1.columns = X_train.columns[selector.get_support(True)] X_test1 = X_test[X_train1.columns] elif feature_sel == "RFECV": estimator = LogisticRegression() selector = RFECV(estimator, step=1, cv=cv) X_train1 = pd.DataFrame(selector.fit_transform(X_train, y_train)) X_train1.columns = X_train.columns[selector.get_support(True)] X_test1 = X_test[X_train1.columns] elif feature_sel == "SelectFromModel": estimator = LogisticRegression() selector = SelectFromModel(estimator) X_train1 = pd.DataFrame(selector.fit_transform(X_train, y_train)) X_train1.columns = X_train.columns[selector.get_support(True)] X_test1 = X_test[X_train1.columns] elif feature_sel == "SelectKBest": selector = SelectKBest() X_train1 = pd.DataFrame(selector.fit_transform(X_train, y_train)) X_train1.columns = X_train.columns[selector.get_support(True)] X_test1 = X_test[X_train1.columns] else: X_train1, X_test1 = X_train, X_test #训练并预测模型 classifier = MLPClassifier(hidden_layer_sizes=hidden_layer_sizes, activation=activation,solver=solver, alpha=alpha,max_iter =max_iter) # 使用类,参数全是默认的 #为避免单次神经网络训练不收敛的情况,反复训练10次,最终预测概率为10次的平均值 probability = 0 for i in range(10): #训练模型 classifier.fit(X_train1, y_train) #预测概率 probability += classifier.predict_proba(X_test1)[:,1] probability = probability / 10 temp = pd.DataFrame({'target' : y_test, 'probability' : probability}) predresult = pd.concat([predresult, temp], ignore_index = True) return predresult def loopMLP_trainandtest(self, testsize, cv, feature_sel, varthreshold, activation, solver,alpha, max_iter =1000, nclusters=10, cmethod=None): df = pd.DataFrame() for i in range (3 , 101,3):#对神经元做循环 hidden_layer_sizes = (i,) #分割train test做测试 predresult = self.MLP_trainandtest(testsize, cv, feature_sel, varthreshold, activation,solver ,alpha, max_iter,nclusters, cmethod, *hidden_layer_sizes) #评估并保存测试结果 auc, ks, metrics_p = self.loopmodelmetrics_scores(predresult) temp = pd.DataFrame({'hidden_first_layer' : i, 'auc_value' : auc ,'ks_value' :ks ,'p0=0.5' :metrics_p['accuracy'][5]} ,index=[0]) df = pd.concat([df, temp], ignore_index = False) print('num %s complete' %i) time0 = time.strftime('%Y%m%d%H%M%S',time.localtime(time.time())) exist = os.path.exists('d:/ACS_CSVS') if exist: df.to_csv('d:/ACS_CSVS/'+time0+'_MLP.csv',index=False,sep=',') else: os.makedirs('d:/ACS_CSVS/') df.to_csv('d:/ACS_CSVS/'+time0+'_MLP.csv',index=False,sep=',') def loopMLP_trainandtest_kfold(self, testsize, cv, feature_sel, varthreshold, activation, solver,alpha, max_iter =1000, nclusters=10, cmethod=None): df = pd.DataFrame() for i in range (3 , 101,3):#对神经元做循环 hidden_layer_sizes = (i,) #分割train test做测试 predresult = self.MLP_trainandtest_kfold(testsize, cv, feature_sel, varthreshold, activation,solver ,alpha, max_iter,nclusters, cmethod, *hidden_layer_sizes) #评估并保存测试结果 auc, ks, metrics_p = self.loopmodelmetrics_scores(predresult) temp = pd.DataFrame({'hidden_first_layer' : i, 'auc_value' : auc ,'ks_value' :ks ,'p0=0.5' :metrics_p['accuracy'][5]} ,index=[0]) df = pd.concat([df, temp], ignore_index = False) print('num %s complete' %i) time0 = time.strftime('%Y%m%d%H%M%S',time.localtime(time.time())) exist = os.path.exists('d:/ACS_CSVS') if exist: df.to_csv('d:/ACS_CSVS/'+time0+'_MLP.csv',index=False,sep=',') else: os.makedirs('d:/ACS_CSVS/') df.to_csv('d:/ACS_CSVS/'+time0+'_MLP.csv',index=False,sep=',')
apache-2.0
CVML/scikit-learn
examples/model_selection/plot_underfitting_overfitting.py
230
2649
""" ============================ Underfitting vs. Overfitting ============================ This example demonstrates the problems of underfitting and overfitting and how we can use linear regression with polynomial features to approximate nonlinear functions. The plot shows the function that we want to approximate, which is a part of the cosine function. In addition, the samples from the real function and the approximations of different models are displayed. The models have polynomial features of different degrees. We can see that a linear function (polynomial with degree 1) is not sufficient to fit the training samples. This is called **underfitting**. A polynomial of degree 4 approximates the true function almost perfectly. However, for higher degrees the model will **overfit** the training data, i.e. it learns the noise of the training data. We evaluate quantitatively **overfitting** / **underfitting** by using cross-validation. We calculate the mean squared error (MSE) on the validation set, the higher, the less likely the model generalizes correctly from the training data. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn.pipeline import Pipeline from sklearn.preprocessing import PolynomialFeatures from sklearn.linear_model import LinearRegression from sklearn import cross_validation np.random.seed(0) n_samples = 30 degrees = [1, 4, 15] true_fun = lambda X: np.cos(1.5 * np.pi * X) X = np.sort(np.random.rand(n_samples)) y = true_fun(X) + np.random.randn(n_samples) * 0.1 plt.figure(figsize=(14, 5)) for i in range(len(degrees)): ax = plt.subplot(1, len(degrees), i + 1) plt.setp(ax, xticks=(), yticks=()) polynomial_features = PolynomialFeatures(degree=degrees[i], include_bias=False) linear_regression = LinearRegression() pipeline = Pipeline([("polynomial_features", polynomial_features), ("linear_regression", linear_regression)]) pipeline.fit(X[:, np.newaxis], y) # Evaluate the models using crossvalidation scores = cross_validation.cross_val_score(pipeline, X[:, np.newaxis], y, scoring="mean_squared_error", cv=10) X_test = np.linspace(0, 1, 100) plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model") plt.plot(X_test, true_fun(X_test), label="True function") plt.scatter(X, y, label="Samples") plt.xlabel("x") plt.ylabel("y") plt.xlim((0, 1)) plt.ylim((-2, 2)) plt.legend(loc="best") plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format( degrees[i], -scores.mean(), scores.std())) plt.show()
bsd-3-clause
ky822/scikit-learn
examples/decomposition/plot_kernel_pca.py
353
2011
""" ========== Kernel PCA ========== This example shows that Kernel PCA is able to find a projection of the data that makes data linearly separable. """ print(__doc__) # Authors: Mathieu Blondel # Andreas Mueller # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.decomposition import PCA, KernelPCA from sklearn.datasets import make_circles np.random.seed(0) X, y = make_circles(n_samples=400, factor=.3, noise=.05) kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10) X_kpca = kpca.fit_transform(X) X_back = kpca.inverse_transform(X_kpca) pca = PCA() X_pca = pca.fit_transform(X) # Plot results plt.figure() plt.subplot(2, 2, 1, aspect='equal') plt.title("Original space") reds = y == 0 blues = y == 1 plt.plot(X[reds, 0], X[reds, 1], "ro") plt.plot(X[blues, 0], X[blues, 1], "bo") plt.xlabel("$x_1$") plt.ylabel("$x_2$") X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50)) X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T # projection on the first principal component (in the phi space) Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape) plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower') plt.subplot(2, 2, 2, aspect='equal') plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro") plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo") plt.title("Projection by PCA") plt.xlabel("1st principal component") plt.ylabel("2nd component") plt.subplot(2, 2, 3, aspect='equal') plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro") plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo") plt.title("Projection by KPCA") plt.xlabel("1st principal component in space induced by $\phi$") plt.ylabel("2nd component") plt.subplot(2, 2, 4, aspect='equal') plt.plot(X_back[reds, 0], X_back[reds, 1], "ro") plt.plot(X_back[blues, 0], X_back[blues, 1], "bo") plt.title("Original space after inverse transform") plt.xlabel("$x_1$") plt.ylabel("$x_2$") plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35) plt.show()
bsd-3-clause
DSLituiev/scikit-learn
examples/plot_johnson_lindenstrauss_bound.py
8
7473
r""" ===================================================================== The Johnson-Lindenstrauss bound for embedding with random projections ===================================================================== The `Johnson-Lindenstrauss lemma`_ states that any high dimensional dataset can be randomly projected into a lower dimensional Euclidean space while controlling the distortion in the pairwise distances. .. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma Theoretical bounds ================== The distortion introduced by a random projection `p` is asserted by the fact that `p` is defining an eps-embedding with good probability as defined by: .. math:: (1 - eps) \|u - v\|^2 < \|p(u) - p(v)\|^2 < (1 + eps) \|u - v\|^2 Where u and v are any rows taken from a dataset of shape [n_samples, n_features] and p is a projection by a random Gaussian N(0, 1) matrix with shape [n_components, n_features] (or a sparse Achlioptas matrix). The minimum number of components to guarantees the eps-embedding is given by: .. math:: n\_components >= 4 log(n\_samples) / (eps^2 / 2 - eps^3 / 3) The first plot shows that with an increasing number of samples ``n_samples``, the minimal number of dimensions ``n_components`` increased logarithmically in order to guarantee an ``eps``-embedding. The second plot shows that an increase of the admissible distortion ``eps`` allows to reduce drastically the minimal number of dimensions ``n_components`` for a given number of samples ``n_samples`` Empirical validation ==================== We validate the above bounds on the digits dataset or on the 20 newsgroups text document (TF-IDF word frequencies) dataset: - for the digits dataset, some 8x8 gray level pixels data for 500 handwritten digits pictures are randomly projected to spaces for various larger number of dimensions ``n_components``. - for the 20 newsgroups dataset some 500 documents with 100k features in total are projected using a sparse random matrix to smaller euclidean spaces with various values for the target number of dimensions ``n_components``. The default dataset is the digits dataset. To run the example on the twenty newsgroups dataset, pass the --twenty-newsgroups command line argument to this script. For each value of ``n_components``, we plot: - 2D distribution of sample pairs with pairwise distances in original and projected spaces as x and y axis respectively. - 1D histogram of the ratio of those distances (projected / original). We can see that for low values of ``n_components`` the distribution is wide with many distorted pairs and a skewed distribution (due to the hard limit of zero ratio on the left as distances are always positives) while for larger values of n_components the distortion is controlled and the distances are well preserved by the random projection. Remarks ======= According to the JL lemma, projecting 500 samples without too much distortion will require at least several thousands dimensions, irrespective of the number of features of the original dataset. Hence using random projections on the digits dataset which only has 64 features in the input space does not make sense: it does not allow for dimensionality reduction in this case. On the twenty newsgroups on the other hand the dimensionality can be decreased from 56436 down to 10000 while reasonably preserving pairwise distances. """ print(__doc__) import sys from time import time import numpy as np import matplotlib.pyplot as plt from sklearn.random_projection import johnson_lindenstrauss_min_dim from sklearn.random_projection import SparseRandomProjection from sklearn.datasets import fetch_20newsgroups_vectorized from sklearn.datasets import load_digits from sklearn.metrics.pairwise import euclidean_distances # Part 1: plot the theoretical dependency between n_components_min and # n_samples # range of admissible distortions eps_range = np.linspace(0.1, 0.99, 5) colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range))) # range of number of samples (observation) to embed n_samples_range = np.logspace(1, 9, 9) plt.figure() for eps, color in zip(eps_range, colors): min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps) plt.loglog(n_samples_range, min_n_components, color=color) plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right") plt.xlabel("Number of observations to eps-embed") plt.ylabel("Minimum number of dimensions") plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components") # range of admissible distortions eps_range = np.linspace(0.01, 0.99, 100) # range of number of samples (observation) to embed n_samples_range = np.logspace(2, 6, 5) colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range))) plt.figure() for n_samples, color in zip(n_samples_range, colors): min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range) plt.semilogy(eps_range, min_n_components, color=color) plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right") plt.xlabel("Distortion eps") plt.ylabel("Minimum number of dimensions") plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps") # Part 2: perform sparse random projection of some digits images which are # quite low dimensional and dense or documents of the 20 newsgroups dataset # which is both high dimensional and sparse if '--twenty-newsgroups' in sys.argv: # Need an internet connection hence not enabled by default data = fetch_20newsgroups_vectorized().data[:500] else: data = load_digits().data[:500] n_samples, n_features = data.shape print("Embedding %d samples with dim %d using various random projections" % (n_samples, n_features)) n_components_range = np.array([300, 1000, 10000]) dists = euclidean_distances(data, squared=True).ravel() # select only non-identical samples pairs nonzero = dists != 0 dists = dists[nonzero] for n_components in n_components_range: t0 = time() rp = SparseRandomProjection(n_components=n_components) projected_data = rp.fit_transform(data) print("Projected %d samples from %d to %d in %0.3fs" % (n_samples, n_features, n_components, time() - t0)) if hasattr(rp, 'components_'): n_bytes = rp.components_.data.nbytes n_bytes += rp.components_.indices.nbytes print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6)) projected_dists = euclidean_distances( projected_data, squared=True).ravel()[nonzero] plt.figure() plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu) plt.xlabel("Pairwise squared distances in original space") plt.ylabel("Pairwise squared distances in projected space") plt.title("Pairwise distances distribution for n_components=%d" % n_components) cb = plt.colorbar() cb.set_label('Sample pairs counts') rates = projected_dists / dists print("Mean distances rate: %0.2f (%0.2f)" % (np.mean(rates), np.std(rates))) plt.figure() plt.hist(rates, bins=50, normed=True, range=(0., 2.)) plt.xlabel("Squared distances rate: projected / original") plt.ylabel("Distribution of samples pairs") plt.title("Histogram of pairwise distance rates for n_components=%d" % n_components) # TODO: compute the expected value of eps and add them to the previous plot # as vertical lines / region plt.show()
bsd-3-clause
bigdataelephants/scikit-learn
sklearn/datasets/tests/test_lfw.py
50
6849
"""This test for the LFW require medium-size data dowloading and processing If the data has not been already downloaded by running the examples, the tests won't run (skipped). If the test are run, the first execution will be long (typically a bit more than a couple of minutes) but as the dataset loader is leveraging joblib, successive runs will be fast (less than 200ms). """ import random import os import shutil import tempfile import numpy as np from sklearn.externals import six try: try: from scipy.misc import imsave except ImportError: from scipy.misc.pilutil import imsave except ImportError: imsave = None from sklearn.datasets import load_lfw_pairs from sklearn.datasets import load_lfw_people from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import SkipTest from sklearn.utils.testing import raises SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_") SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_") LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home') FAKE_NAMES = [ 'Abdelatif_Smith', 'Abhati_Kepler', 'Camara_Alvaro', 'Chen_Dupont', 'John_Lee', 'Lin_Bauman', 'Onur_Lopez', ] def setup_module(): """Test fixture run once and common to all tests of this module""" if imsave is None: raise SkipTest("PIL not installed.") if not os.path.exists(LFW_HOME): os.makedirs(LFW_HOME) random_state = random.Random(42) np_rng = np.random.RandomState(42) # generate some random jpeg files for each person counts = {} for name in FAKE_NAMES: folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name) if not os.path.exists(folder_name): os.makedirs(folder_name) n_faces = np_rng.randint(1, 5) counts[name] = n_faces for i in range(n_faces): file_path = os.path.join(folder_name, name + '_%04d.jpg' % i) uniface = np_rng.randint(0, 255, size=(250, 250, 3)) try: imsave(file_path, uniface) except ImportError: raise SkipTest("PIL not installed") # add some random file pollution to test robustness with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f: f.write(six.b('Text file to be ignored by the dataset loader.')) # generate some pairing metadata files using the same format as LFW with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f: f.write(six.b("10\n")) more_than_two = [name for name, count in six.iteritems(counts) if count >= 2] for i in range(5): name = random_state.choice(more_than_two) first, second = random_state.sample(range(counts[name]), 2) f.write(six.b('%s\t%d\t%d\n' % (name, first, second))) for i in range(5): first_name, second_name = random_state.sample(FAKE_NAMES, 2) first_index = random_state.choice(np.arange(counts[first_name])) second_index = random_state.choice(np.arange(counts[second_name])) f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index, second_name, second_index))) with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f: f.write(six.b("Fake place holder that won't be tested")) with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f: f.write(six.b("Fake place holder that won't be tested")) def teardown_module(): """Test fixture (clean up) run once after all tests of this module""" if os.path.isdir(SCIKIT_LEARN_DATA): shutil.rmtree(SCIKIT_LEARN_DATA) if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA): shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA) @raises(IOError) def test_load_empty_lfw_people(): load_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA) def test_load_fake_lfw_people(): lfw_people = load_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=3) # The data is croped around the center as a rectangular bounding box # arounthe the face. Colors are converted to gray levels: assert_equal(lfw_people.images.shape, (10, 62, 47)) assert_equal(lfw_people.data.shape, (10, 2914)) # the target is array of person integer ids assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2]) # names of the persons can be found using the target_names array expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez'] assert_array_equal(lfw_people.target_names, expected_classes) # It is possible to ask for the original data without any croping or color # conversion and not limit on the number of picture per person lfw_people = load_lfw_people(data_home=SCIKIT_LEARN_DATA, resize=None, slice_=None, color=True) assert_equal(lfw_people.images.shape, (17, 250, 250, 3)) # the ids and class names are the same as previously assert_array_equal(lfw_people.target, [0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2]) assert_array_equal(lfw_people.target_names, ['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro', 'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez']) @raises(ValueError) def test_load_fake_lfw_people_too_restrictive(): load_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100) @raises(IOError) def test_load_empty_lfw_pairs(): load_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA) def test_load_fake_lfw_pairs(): lfw_pairs_train = load_lfw_pairs(data_home=SCIKIT_LEARN_DATA) # The data is croped around the center as a rectangular bounding box # arounthe the face. Colors are converted to gray levels: assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47)) # the target is whether the person is the same or not assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0]) # names of the persons can be found using the target_names array expected_classes = ['Different persons', 'Same person'] assert_array_equal(lfw_pairs_train.target_names, expected_classes) # It is possible to ask for the original data without any croping or color # conversion lfw_pairs_train = load_lfw_pairs(data_home=SCIKIT_LEARN_DATA, resize=None, slice_=None, color=True) assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3)) # the ids and class names are the same as previously assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0]) assert_array_equal(lfw_pairs_train.target_names, expected_classes)
bsd-3-clause
elkingtonmcb/scikit-learn
sklearn/feature_selection/tests/test_feature_select.py
103
22297
""" Todo: cross-check the F-value with stats model """ from __future__ import division import itertools import warnings import numpy as np from scipy import stats, sparse from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_not_in from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_warns from sklearn.utils.testing import ignore_warnings from sklearn.utils.testing import assert_warns_message from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_greater_equal from sklearn.utils import safe_mask from sklearn.datasets.samples_generator import (make_classification, make_regression) from sklearn.feature_selection import (chi2, f_classif, f_oneway, f_regression, SelectPercentile, SelectKBest, SelectFpr, SelectFdr, SelectFwe, GenericUnivariateSelect) ############################################################################## # Test the score functions def test_f_oneway_vs_scipy_stats(): # Test that our f_oneway gives the same result as scipy.stats rng = np.random.RandomState(0) X1 = rng.randn(10, 3) X2 = 1 + rng.randn(10, 3) f, pv = stats.f_oneway(X1, X2) f2, pv2 = f_oneway(X1, X2) assert_true(np.allclose(f, f2)) assert_true(np.allclose(pv, pv2)) def test_f_oneway_ints(): # Smoke test f_oneway on integers: that it does raise casting errors # with recent numpys rng = np.random.RandomState(0) X = rng.randint(10, size=(10, 10)) y = np.arange(10) fint, pint = f_oneway(X, y) # test that is gives the same result as with float f, p = f_oneway(X.astype(np.float), y) assert_array_almost_equal(f, fint, decimal=4) assert_array_almost_equal(p, pint, decimal=4) def test_f_classif(): # Test whether the F test yields meaningful results # on a simple simulated classification problem X, y = make_classification(n_samples=200, n_features=20, n_informative=3, n_redundant=2, n_repeated=0, n_classes=8, n_clusters_per_class=1, flip_y=0.0, class_sep=10, shuffle=False, random_state=0) F, pv = f_classif(X, y) F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y) assert_true((F > 0).all()) assert_true((pv > 0).all()) assert_true((pv < 1).all()) assert_true((pv[:5] < 0.05).all()) assert_true((pv[5:] > 1.e-4).all()) assert_array_almost_equal(F_sparse, F) assert_array_almost_equal(pv_sparse, pv) def test_f_regression(): # Test whether the F test yields meaningful results # on a simple simulated regression problem X, y = make_regression(n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0) F, pv = f_regression(X, y) assert_true((F > 0).all()) assert_true((pv > 0).all()) assert_true((pv < 1).all()) assert_true((pv[:5] < 0.05).all()) assert_true((pv[5:] > 1.e-4).all()) # again without centering, compare with sparse F, pv = f_regression(X, y, center=False) F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False) assert_array_almost_equal(F_sparse, F) assert_array_almost_equal(pv_sparse, pv) def test_f_regression_input_dtype(): # Test whether f_regression returns the same value # for any numeric data_type rng = np.random.RandomState(0) X = rng.rand(10, 20) y = np.arange(10).astype(np.int) F1, pv1 = f_regression(X, y) F2, pv2 = f_regression(X, y.astype(np.float)) assert_array_almost_equal(F1, F2, 5) assert_array_almost_equal(pv1, pv2, 5) def test_f_regression_center(): # Test whether f_regression preserves dof according to 'center' argument # We use two centered variates so we have a simple relationship between # F-score with variates centering and F-score without variates centering. # Create toy example X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean n_samples = X.size Y = np.ones(n_samples) Y[::2] *= -1. Y[0] = 0. # have Y mean being null F1, _ = f_regression(X, Y, center=True) F2, _ = f_regression(X, Y, center=False) assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2) assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS def test_f_classif_multi_class(): # Test whether the F test yields meaningful results # on a simple simulated classification problem X, y = make_classification(n_samples=200, n_features=20, n_informative=3, n_redundant=2, n_repeated=0, n_classes=8, n_clusters_per_class=1, flip_y=0.0, class_sep=10, shuffle=False, random_state=0) F, pv = f_classif(X, y) assert_true((F > 0).all()) assert_true((pv > 0).all()) assert_true((pv < 1).all()) assert_true((pv[:5] < 0.05).all()) assert_true((pv[5:] > 1.e-4).all()) def test_select_percentile_classif(): # Test whether the relative univariate feature selection # gets the correct items in a simple classification problem # with the percentile heuristic X, y = make_classification(n_samples=200, n_features=20, n_informative=3, n_redundant=2, n_repeated=0, n_classes=8, n_clusters_per_class=1, flip_y=0.0, class_sep=10, shuffle=False, random_state=0) univariate_filter = SelectPercentile(f_classif, percentile=25) X_r = univariate_filter.fit(X, y).transform(X) X_r2 = GenericUnivariateSelect(f_classif, mode='percentile', param=25).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.zeros(20) gtruth[:5] = 1 assert_array_equal(support, gtruth) def test_select_percentile_classif_sparse(): # Test whether the relative univariate feature selection # gets the correct items in a simple classification problem # with the percentile heuristic X, y = make_classification(n_samples=200, n_features=20, n_informative=3, n_redundant=2, n_repeated=0, n_classes=8, n_clusters_per_class=1, flip_y=0.0, class_sep=10, shuffle=False, random_state=0) X = sparse.csr_matrix(X) univariate_filter = SelectPercentile(f_classif, percentile=25) X_r = univariate_filter.fit(X, y).transform(X) X_r2 = GenericUnivariateSelect(f_classif, mode='percentile', param=25).fit(X, y).transform(X) assert_array_equal(X_r.toarray(), X_r2.toarray()) support = univariate_filter.get_support() gtruth = np.zeros(20) gtruth[:5] = 1 assert_array_equal(support, gtruth) X_r2inv = univariate_filter.inverse_transform(X_r2) assert_true(sparse.issparse(X_r2inv)) support_mask = safe_mask(X_r2inv, support) assert_equal(X_r2inv.shape, X.shape) assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray()) # Check other columns are empty assert_equal(X_r2inv.getnnz(), X_r.getnnz()) ############################################################################## # Test univariate selection in classification settings def test_select_kbest_classif(): # Test whether the relative univariate feature selection # gets the correct items in a simple classification problem # with the k best heuristic X, y = make_classification(n_samples=200, n_features=20, n_informative=3, n_redundant=2, n_repeated=0, n_classes=8, n_clusters_per_class=1, flip_y=0.0, class_sep=10, shuffle=False, random_state=0) univariate_filter = SelectKBest(f_classif, k=5) X_r = univariate_filter.fit(X, y).transform(X) X_r2 = GenericUnivariateSelect( f_classif, mode='k_best', param=5).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.zeros(20) gtruth[:5] = 1 assert_array_equal(support, gtruth) def test_select_kbest_all(): # Test whether k="all" correctly returns all features. X, y = make_classification(n_samples=20, n_features=10, shuffle=False, random_state=0) univariate_filter = SelectKBest(f_classif, k='all') X_r = univariate_filter.fit(X, y).transform(X) assert_array_equal(X, X_r) def test_select_kbest_zero(): # Test whether k=0 correctly returns no features. X, y = make_classification(n_samples=20, n_features=10, shuffle=False, random_state=0) univariate_filter = SelectKBest(f_classif, k=0) univariate_filter.fit(X, y) support = univariate_filter.get_support() gtruth = np.zeros(10, dtype=bool) assert_array_equal(support, gtruth) X_selected = assert_warns_message(UserWarning, 'No features were selected', univariate_filter.transform, X) assert_equal(X_selected.shape, (20, 0)) def test_select_heuristics_classif(): # Test whether the relative univariate feature selection # gets the correct items in a simple classification problem # with the fdr, fwe and fpr heuristics X, y = make_classification(n_samples=200, n_features=20, n_informative=3, n_redundant=2, n_repeated=0, n_classes=8, n_clusters_per_class=1, flip_y=0.0, class_sep=10, shuffle=False, random_state=0) univariate_filter = SelectFwe(f_classif, alpha=0.01) X_r = univariate_filter.fit(X, y).transform(X) gtruth = np.zeros(20) gtruth[:5] = 1 for mode in ['fdr', 'fpr', 'fwe']: X_r2 = GenericUnivariateSelect( f_classif, mode=mode, param=0.01).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() assert_array_almost_equal(support, gtruth) ############################################################################## # Test univariate selection in regression settings def assert_best_scores_kept(score_filter): scores = score_filter.scores_ support = score_filter.get_support() assert_array_equal(np.sort(scores[support]), np.sort(scores)[-support.sum():]) def test_select_percentile_regression(): # Test whether the relative univariate feature selection # gets the correct items in a simple regression problem # with the percentile heuristic X, y = make_regression(n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0) univariate_filter = SelectPercentile(f_regression, percentile=25) X_r = univariate_filter.fit(X, y).transform(X) assert_best_scores_kept(univariate_filter) X_r2 = GenericUnivariateSelect( f_regression, mode='percentile', param=25).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.zeros(20) gtruth[:5] = 1 assert_array_equal(support, gtruth) X_2 = X.copy() X_2[:, np.logical_not(support)] = 0 assert_array_equal(X_2, univariate_filter.inverse_transform(X_r)) # Check inverse_transform respects dtype assert_array_equal(X_2.astype(bool), univariate_filter.inverse_transform(X_r.astype(bool))) def test_select_percentile_regression_full(): # Test whether the relative univariate feature selection # selects all features when '100%' is asked. X, y = make_regression(n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0) univariate_filter = SelectPercentile(f_regression, percentile=100) X_r = univariate_filter.fit(X, y).transform(X) assert_best_scores_kept(univariate_filter) X_r2 = GenericUnivariateSelect( f_regression, mode='percentile', param=100).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.ones(20) assert_array_equal(support, gtruth) def test_invalid_percentile(): X, y = make_regression(n_samples=10, n_features=20, n_informative=2, shuffle=False, random_state=0) assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y) assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y) assert_raises(ValueError, GenericUnivariateSelect(mode='percentile', param=-1).fit, X, y) assert_raises(ValueError, GenericUnivariateSelect(mode='percentile', param=101).fit, X, y) def test_select_kbest_regression(): # Test whether the relative univariate feature selection # gets the correct items in a simple regression problem # with the k best heuristic X, y = make_regression(n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0, noise=10) univariate_filter = SelectKBest(f_regression, k=5) X_r = univariate_filter.fit(X, y).transform(X) assert_best_scores_kept(univariate_filter) X_r2 = GenericUnivariateSelect( f_regression, mode='k_best', param=5).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.zeros(20) gtruth[:5] = 1 assert_array_equal(support, gtruth) def test_select_heuristics_regression(): # Test whether the relative univariate feature selection # gets the correct items in a simple regression problem # with the fpr, fdr or fwe heuristics X, y = make_regression(n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0, noise=10) univariate_filter = SelectFpr(f_regression, alpha=0.01) X_r = univariate_filter.fit(X, y).transform(X) gtruth = np.zeros(20) gtruth[:5] = 1 for mode in ['fdr', 'fpr', 'fwe']: X_r2 = GenericUnivariateSelect( f_regression, mode=mode, param=0.01).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool)) assert_less(np.sum(support[5:] == 1), 3) def test_select_fdr_regression(): # Test that fdr heuristic actually has low FDR. def single_fdr(alpha, n_informative, random_state): X, y = make_regression(n_samples=150, n_features=20, n_informative=n_informative, shuffle=False, random_state=random_state, noise=10) with warnings.catch_warnings(record=True): # Warnings can be raised when no features are selected # (low alpha or very noisy data) univariate_filter = SelectFdr(f_regression, alpha=alpha) X_r = univariate_filter.fit(X, y).transform(X) X_r2 = GenericUnivariateSelect( f_regression, mode='fdr', param=alpha).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() num_false_positives = np.sum(support[n_informative:] == 1) num_true_positives = np.sum(support[:n_informative] == 1) if num_false_positives == 0: return 0. false_discovery_rate = (num_false_positives / (num_true_positives + num_false_positives)) return false_discovery_rate for alpha in [0.001, 0.01, 0.1]: for n_informative in [1, 5, 10]: # As per Benjamini-Hochberg, the expected false discovery rate # should be lower than alpha: # FDR = E(FP / (TP + FP)) <= alpha false_discovery_rate = np.mean([single_fdr(alpha, n_informative, random_state) for random_state in range(30)]) assert_greater_equal(alpha, false_discovery_rate) # Make sure that the empirical false discovery rate increases # with alpha: if false_discovery_rate != 0: assert_greater(false_discovery_rate, alpha / 10) def test_select_fwe_regression(): # Test whether the relative univariate feature selection # gets the correct items in a simple regression problem # with the fwe heuristic X, y = make_regression(n_samples=200, n_features=20, n_informative=5, shuffle=False, random_state=0) univariate_filter = SelectFwe(f_regression, alpha=0.01) X_r = univariate_filter.fit(X, y).transform(X) X_r2 = GenericUnivariateSelect( f_regression, mode='fwe', param=0.01).fit(X, y).transform(X) assert_array_equal(X_r, X_r2) support = univariate_filter.get_support() gtruth = np.zeros(20) gtruth[:5] = 1 assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool)) assert_less(np.sum(support[5:] == 1), 2) def test_selectkbest_tiebreaking(): # Test whether SelectKBest actually selects k features in case of ties. # Prior to 0.11, SelectKBest would return more features than requested. Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]] y = [1] dummy_score = lambda X, y: (X[0], X[0]) for X in Xs: sel = SelectKBest(dummy_score, k=1) X1 = ignore_warnings(sel.fit_transform)([X], y) assert_equal(X1.shape[1], 1) assert_best_scores_kept(sel) sel = SelectKBest(dummy_score, k=2) X2 = ignore_warnings(sel.fit_transform)([X], y) assert_equal(X2.shape[1], 2) assert_best_scores_kept(sel) def test_selectpercentile_tiebreaking(): # Test if SelectPercentile selects the right n_features in case of ties. Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]] y = [1] dummy_score = lambda X, y: (X[0], X[0]) for X in Xs: sel = SelectPercentile(dummy_score, percentile=34) X1 = ignore_warnings(sel.fit_transform)([X], y) assert_equal(X1.shape[1], 1) assert_best_scores_kept(sel) sel = SelectPercentile(dummy_score, percentile=67) X2 = ignore_warnings(sel.fit_transform)([X], y) assert_equal(X2.shape[1], 2) assert_best_scores_kept(sel) def test_tied_pvalues(): # Test whether k-best and percentiles work with tied pvalues from chi2. # chi2 will return the same p-values for the following features, but it # will return different scores. X0 = np.array([[10000, 9999, 9998], [1, 1, 1]]) y = [0, 1] for perm in itertools.permutations((0, 1, 2)): X = X0[:, perm] Xt = SelectKBest(chi2, k=2).fit_transform(X, y) assert_equal(Xt.shape, (2, 2)) assert_not_in(9998, Xt) Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y) assert_equal(Xt.shape, (2, 2)) assert_not_in(9998, Xt) def test_tied_scores(): # Test for stable sorting in k-best with tied scores. X_train = np.array([[0, 0, 0], [1, 1, 1]]) y_train = [0, 1] for n_features in [1, 2, 3]: sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train) X_test = sel.transform([[0, 1, 2]]) assert_array_equal(X_test[0], np.arange(3)[-n_features:]) def test_nans(): # Assert that SelectKBest and SelectPercentile can handle NaNs. # First feature has zero variance to confuse f_classif (ANOVA) and # make it return a NaN. X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]] y = [1, 0, 1] for select in (SelectKBest(f_classif, 2), SelectPercentile(f_classif, percentile=67)): ignore_warnings(select.fit)(X, y) assert_array_equal(select.get_support(indices=True), np.array([1, 2])) def test_score_func_error(): X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]] y = [1, 0, 1] for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe, SelectFdr, SelectFpr, GenericUnivariateSelect]: assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y) def test_invalid_k(): X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]] y = [1, 0, 1] assert_raises(ValueError, SelectKBest(k=-1).fit, X, y) assert_raises(ValueError, SelectKBest(k=4).fit, X, y) assert_raises(ValueError, GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y) assert_raises(ValueError, GenericUnivariateSelect(mode='k_best', param=4).fit, X, y) def test_f_classif_constant_feature(): # Test that f_classif warns if a feature is constant throughout. X, y = make_classification(n_samples=10, n_features=5) X[:, 0] = 2.0 assert_warns(UserWarning, f_classif, X, y) def test_no_feature_selected(): rng = np.random.RandomState(0) # Generate random uncorrelated data: a strict univariate test should # rejects all the features X = rng.rand(40, 10) y = rng.randint(0, 4, size=40) strict_selectors = [ SelectFwe(alpha=0.01).fit(X, y), SelectFdr(alpha=0.01).fit(X, y), SelectFpr(alpha=0.01).fit(X, y), SelectPercentile(percentile=0).fit(X, y), SelectKBest(k=0).fit(X, y), ] for selector in strict_selectors: assert_array_equal(selector.get_support(), np.zeros(10)) X_selected = assert_warns_message( UserWarning, 'No features were selected', selector.transform, X) assert_equal(X_selected.shape, (40, 0))
bsd-3-clause
scikit-learn-contrib/py-earth
examples/plot_feature_importance.py
3
2142
""" =========================== Plotting feature importance =========================== A simple example showing how to compute and display feature importances, it is also compared with the feature importances obtained using random forests. Feature importance is a measure of the effect of the features on the outputs. For each feature, the values go from 0 to 1 where a higher the value means that the feature will have a higher effect on the outputs. Currently three criteria are supported : 'gcv', 'rss' and 'nb_subsets'. See [1], section 12.3 for more information about the criteria. .. [1] http://www.milbo.org/doc/earth-notes.pdf """ import numpy import matplotlib.pyplot as plt from sklearn.ensemble import RandomForestRegressor from pyearth import Earth # Create some fake data numpy.random.seed(2) m = 10000 n = 10 X = numpy.random.uniform(size=(m, n)) y = (10 * numpy.sin(numpy.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 + 10 * X[:, 3] + 5 * X[:, 4] + numpy.random.uniform(size=m)) # Fit an Earth model criteria = ('rss', 'gcv', 'nb_subsets') model = Earth(max_degree=3, max_terms=10, minspan_alpha=.5, feature_importance_type=criteria, verbose=True) model.fit(X, y) rf = RandomForestRegressor() rf.fit(X, y) # Print the model print(model.trace()) print(model.summary()) print(model.summary_feature_importances(sort_by='gcv')) # Plot the feature importances importances = model.feature_importances_ importances['random_forest'] = rf.feature_importances_ criteria = criteria + ('random_forest',) idx = 1 fig = plt.figure(figsize=(20, 10)) labels = ['$x_{}$'.format(i) for i in range(n)] for crit in criteria: plt.subplot(2, 2, idx) plt.bar(numpy.arange(len(labels)), importances[crit], align='center', color='red') plt.xticks(numpy.arange(len(labels)), labels) plt.title(crit) plt.ylabel('importances') idx += 1 title = '$x_0,...x_9 \sim \mathcal{N}(0, 1)$\n$y= 10sin(\pi x_{0}x_{1}) + 20(x_2 - 0.5)^2 + 10x_3 + 5x_4 + Unif(0, 1)$' fig.suptitle(title, fontsize="x-large") plt.show()
bsd-3-clause
hugobowne/scikit-learn
sklearn/metrics/tests/test_regression.py
272
6066
from __future__ import division, print_function import numpy as np from itertools import product from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.metrics import explained_variance_score from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_squared_error from sklearn.metrics import median_absolute_error from sklearn.metrics import r2_score from sklearn.metrics.regression import _check_reg_targets def test_regression_metrics(n_samples=50): y_true = np.arange(n_samples) y_pred = y_true + 1 assert_almost_equal(mean_squared_error(y_true, y_pred), 1.) assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.) assert_almost_equal(median_absolute_error(y_true, y_pred), 1.) assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2) assert_almost_equal(explained_variance_score(y_true, y_pred), 1.) def test_multioutput_regression(): y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]]) y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]]) error = mean_squared_error(y_true, y_pred) assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.) # mean_absolute_error and mean_squared_error are equal because # it is a binary problem. error = mean_absolute_error(y_true, y_pred) assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.) error = r2_score(y_true, y_pred, multioutput='variance_weighted') assert_almost_equal(error, 1. - 5. / 2) error = r2_score(y_true, y_pred, multioutput='uniform_average') assert_almost_equal(error, -.875) def test_regression_metrics_at_limits(): assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2) assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2) assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2) assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2) assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2) def test__check_reg_targets(): # All of length 3 EXAMPLES = [ ("continuous", [1, 2, 3], 1), ("continuous", [[1], [2], [3]], 1), ("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2), ("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2), ("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3), ] for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES, repeat=2): if type1 == type2 and n_out1 == n_out2: y_type, y_check1, y_check2, multioutput = _check_reg_targets( y1, y2, None) assert_equal(type1, y_type) if type1 == 'continuous': assert_array_equal(y_check1, np.reshape(y1, (-1, 1))) assert_array_equal(y_check2, np.reshape(y2, (-1, 1))) else: assert_array_equal(y_check1, y1) assert_array_equal(y_check2, y2) else: assert_raises(ValueError, _check_reg_targets, y1, y2, None) def test_regression_multioutput_array(): y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]] y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]] mse = mean_squared_error(y_true, y_pred, multioutput='raw_values') mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values') r = r2_score(y_true, y_pred, multioutput='raw_values') evs = explained_variance_score(y_true, y_pred, multioutput='raw_values') assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2) assert_array_almost_equal(mae, [0.25, 0.625], decimal=2) assert_array_almost_equal(r, [0.95, 0.93], decimal=2) assert_array_almost_equal(evs, [0.95, 0.93], decimal=2) # mean_absolute_error and mean_squared_error are equal because # it is a binary problem. y_true = [[0, 0]]*4 y_pred = [[1, 1]]*4 mse = mean_squared_error(y_true, y_pred, multioutput='raw_values') mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values') r = r2_score(y_true, y_pred, multioutput='raw_values') assert_array_almost_equal(mse, [1., 1.], decimal=2) assert_array_almost_equal(mae, [1., 1.], decimal=2) assert_array_almost_equal(r, [0., 0.], decimal=2) r = r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='raw_values') assert_array_almost_equal(r, [0, -3.5], decimal=2) assert_equal(np.mean(r), r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='uniform_average')) evs = explained_variance_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='raw_values') assert_array_almost_equal(evs, [0, -1.25], decimal=2) # Checking for the condition in which both numerator and denominator is # zero. y_true = [[1, 3], [-1, 2]] y_pred = [[1, 4], [-1, 1]] r2 = r2_score(y_true, y_pred, multioutput='raw_values') assert_array_almost_equal(r2, [1., -3.], decimal=2) assert_equal(np.mean(r2), r2_score(y_true, y_pred, multioutput='uniform_average')) evs = explained_variance_score(y_true, y_pred, multioutput='raw_values') assert_array_almost_equal(evs, [1., -3.], decimal=2) assert_equal(np.mean(evs), explained_variance_score(y_true, y_pred)) def test_regression_custom_weights(): y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]] y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]] msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6]) maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6]) rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6]) evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6]) assert_almost_equal(msew, 0.39, decimal=2) assert_almost_equal(maew, 0.475, decimal=3) assert_almost_equal(rw, 0.94, decimal=2) assert_almost_equal(evsw, 0.94, decimal=2)
bsd-3-clause
Clyde-fare/scikit-learn
sklearn/metrics/ranking.py
79
25426
"""Metrics to assess performance on classification task given scores Functions named as ``*_score`` return a scalar value to maximize: the higher the better Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize: the lower the better """ # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Mathieu Blondel <mathieu@mblondel.org> # Olivier Grisel <olivier.grisel@ensta.org> # Arnaud Joly <a.joly@ulg.ac.be> # Jochen Wersdorfer <jochen@wersdoerfer.de> # Lars Buitinck <L.J.Buitinck@uva.nl> # Joel Nothman <joel.nothman@gmail.com> # Noel Dawe <noel@dawe.me> # License: BSD 3 clause from __future__ import division import warnings import numpy as np from scipy.sparse import csr_matrix from ..utils import check_consistent_length from ..utils import column_or_1d, check_array from ..utils.multiclass import type_of_target from ..utils.fixes import isclose from ..utils.fixes import bincount from ..utils.stats import rankdata from ..utils.sparsefuncs import count_nonzero from .base import _average_binary_score from .base import UndefinedMetricWarning def auc(x, y, reorder=False): """Compute Area Under the Curve (AUC) using the trapezoidal rule This is a general function, given points on a curve. For computing the area under the ROC-curve, see :func:`roc_auc_score`. Parameters ---------- x : array, shape = [n] x coordinates. y : array, shape = [n] y coordinates. reorder : boolean, optional (default=False) If True, assume that the curve is ascending in the case of ties, as for an ROC curve. If the curve is non-ascending, the result will be wrong. Returns ------- auc : float Examples -------- >>> import numpy as np >>> from sklearn import metrics >>> y = np.array([1, 1, 2, 2]) >>> pred = np.array([0.1, 0.4, 0.35, 0.8]) >>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2) >>> metrics.auc(fpr, tpr) 0.75 See also -------- roc_auc_score : Computes the area under the ROC curve precision_recall_curve : Compute precision-recall pairs for different probability thresholds """ check_consistent_length(x, y) x = column_or_1d(x) y = column_or_1d(y) if x.shape[0] < 2: raise ValueError('At least 2 points are needed to compute' ' area under curve, but x.shape = %s' % x.shape) direction = 1 if reorder: # reorder the data points according to the x axis and using y to # break ties order = np.lexsort((y, x)) x, y = x[order], y[order] else: dx = np.diff(x) if np.any(dx < 0): if np.all(dx <= 0): direction = -1 else: raise ValueError("Reordering is not turned on, and " "the x array is not increasing: %s" % x) area = direction * np.trapz(y, x) return area def average_precision_score(y_true, y_score, average="macro", sample_weight=None): """Compute average precision (AP) from prediction scores This score corresponds to the area under the precision-recall curve. Note: this implementation is restricted to the binary classification task or multilabel classification task. Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`. Parameters ---------- y_true : array, shape = [n_samples] or [n_samples, n_classes] True binary labels in binary label indicators. y_score : array, shape = [n_samples] or [n_samples, n_classes] Target scores, can either be probability estimates of the positive class, confidence values, or binary decisions. average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted'] If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data: ``'micro'``: Calculate metrics globally by considering each element of the label indicator matrix as a label. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). ``'samples'``: Calculate metrics for each instance, and find their average. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- average_precision : float References ---------- .. [1] `Wikipedia entry for the Average precision <http://en.wikipedia.org/wiki/Average_precision>`_ See also -------- roc_auc_score : Area under the ROC curve precision_recall_curve : Compute precision-recall pairs for different probability thresholds Examples -------- >>> import numpy as np >>> from sklearn.metrics import average_precision_score >>> y_true = np.array([0, 0, 1, 1]) >>> y_scores = np.array([0.1, 0.4, 0.35, 0.8]) >>> average_precision_score(y_true, y_scores) # doctest: +ELLIPSIS 0.79... """ def _binary_average_precision(y_true, y_score, sample_weight=None): precision, recall, thresholds = precision_recall_curve( y_true, y_score, sample_weight=sample_weight) return auc(recall, precision) return _average_binary_score(_binary_average_precision, y_true, y_score, average, sample_weight=sample_weight) def roc_auc_score(y_true, y_score, average="macro", sample_weight=None): """Compute Area Under the Curve (AUC) from prediction scores Note: this implementation is restricted to the binary classification task or multilabel classification task in label indicator format. Read more in the :ref:`User Guide <roc_metrics>`. Parameters ---------- y_true : array, shape = [n_samples] or [n_samples, n_classes] True binary labels in binary label indicators. y_score : array, shape = [n_samples] or [n_samples, n_classes] Target scores, can either be probability estimates of the positive class, confidence values, or binary decisions. average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted'] If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data: ``'micro'``: Calculate metrics globally by considering each element of the label indicator matrix as a label. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). ``'samples'``: Calculate metrics for each instance, and find their average. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- auc : float References ---------- .. [1] `Wikipedia entry for the Receiver operating characteristic <http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_ See also -------- average_precision_score : Area under the precision-recall curve roc_curve : Compute Receiver operating characteristic (ROC) Examples -------- >>> import numpy as np >>> from sklearn.metrics import roc_auc_score >>> y_true = np.array([0, 0, 1, 1]) >>> y_scores = np.array([0.1, 0.4, 0.35, 0.8]) >>> roc_auc_score(y_true, y_scores) 0.75 """ def _binary_roc_auc_score(y_true, y_score, sample_weight=None): if len(np.unique(y_true)) != 2: raise ValueError("Only one class present in y_true. ROC AUC score " "is not defined in that case.") fpr, tpr, tresholds = roc_curve(y_true, y_score, sample_weight=sample_weight) return auc(fpr, tpr, reorder=True) return _average_binary_score( _binary_roc_auc_score, y_true, y_score, average, sample_weight=sample_weight) def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None): """Calculate true and false positives per binary classification threshold. Parameters ---------- y_true : array, shape = [n_samples] True targets of binary classification y_score : array, shape = [n_samples] Estimated probabilities or decision function pos_label : int, optional (default=None) The label of the positive class sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- fps : array, shape = [n_thresholds] A count of false positives, at index i being the number of negative samples assigned a score >= thresholds[i]. The total number of negative samples is equal to fps[-1] (thus true negatives are given by fps[-1] - fps). tps : array, shape = [n_thresholds <= len(np.unique(y_score))] An increasing count of true positives, at index i being the number of positive samples assigned a score >= thresholds[i]. The total number of positive samples is equal to tps[-1] (thus false negatives are given by tps[-1] - tps). thresholds : array, shape = [n_thresholds] Decreasing score values. """ check_consistent_length(y_true, y_score) y_true = column_or_1d(y_true) y_score = column_or_1d(y_score) if sample_weight is not None: sample_weight = column_or_1d(sample_weight) # ensure binary classification if pos_label is not specified classes = np.unique(y_true) if (pos_label is None and not (np.all(classes == [0, 1]) or np.all(classes == [-1, 1]) or np.all(classes == [0]) or np.all(classes == [-1]) or np.all(classes == [1]))): raise ValueError("Data is not binary and pos_label is not specified") elif pos_label is None: pos_label = 1. # make y_true a boolean vector y_true = (y_true == pos_label) # sort scores and corresponding truth values desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1] y_score = y_score[desc_score_indices] y_true = y_true[desc_score_indices] if sample_weight is not None: weight = sample_weight[desc_score_indices] else: weight = 1. # y_score typically has many tied values. Here we extract # the indices associated with the distinct values. We also # concatenate a value for the end of the curve. # We need to use isclose to avoid spurious repeated thresholds # stemming from floating point roundoff errors. distinct_value_indices = np.where(np.logical_not(isclose( np.diff(y_score), 0)))[0] threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1] # accumulate the true positives with decreasing threshold tps = (y_true * weight).cumsum()[threshold_idxs] if sample_weight is not None: fps = weight.cumsum()[threshold_idxs] - tps else: fps = 1 + threshold_idxs - tps return fps, tps, y_score[threshold_idxs] def precision_recall_curve(y_true, probas_pred, pos_label=None, sample_weight=None): """Compute precision-recall pairs for different probability thresholds Note: this implementation is restricted to the binary classification task. The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of true positives and ``fp`` the number of false positives. The precision is intuitively the ability of the classifier not to label as positive a sample that is negative. The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of true positives and ``fn`` the number of false negatives. The recall is intuitively the ability of the classifier to find all the positive samples. The last precision and recall values are 1. and 0. respectively and do not have a corresponding threshold. This ensures that the graph starts on the x axis. Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`. Parameters ---------- y_true : array, shape = [n_samples] True targets of binary classification in range {-1, 1} or {0, 1}. probas_pred : array, shape = [n_samples] Estimated probabilities or decision function. pos_label : int, optional (default=None) The label of the positive class sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- precision : array, shape = [n_thresholds + 1] Precision values such that element i is the precision of predictions with score >= thresholds[i] and the last element is 1. recall : array, shape = [n_thresholds + 1] Decreasing recall values such that element i is the recall of predictions with score >= thresholds[i] and the last element is 0. thresholds : array, shape = [n_thresholds <= len(np.unique(probas_pred))] Increasing thresholds on the decision function used to compute precision and recall. Examples -------- >>> import numpy as np >>> from sklearn.metrics import precision_recall_curve >>> y_true = np.array([0, 0, 1, 1]) >>> y_scores = np.array([0.1, 0.4, 0.35, 0.8]) >>> precision, recall, thresholds = precision_recall_curve( ... y_true, y_scores) >>> precision # doctest: +ELLIPSIS array([ 0.66..., 0.5 , 1. , 1. ]) >>> recall array([ 1. , 0.5, 0.5, 0. ]) >>> thresholds array([ 0.35, 0.4 , 0.8 ]) """ fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred, pos_label=pos_label, sample_weight=sample_weight) precision = tps / (tps + fps) recall = tps / tps[-1] # stop when full recall attained # and reverse the outputs so recall is decreasing last_ind = tps.searchsorted(tps[-1]) sl = slice(last_ind, None, -1) return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl] def roc_curve(y_true, y_score, pos_label=None, sample_weight=None): """Compute Receiver operating characteristic (ROC) Note: this implementation is restricted to the binary classification task. Read more in the :ref:`User Guide <roc_metrics>`. Parameters ---------- y_true : array, shape = [n_samples] True binary labels in range {0, 1} or {-1, 1}. If labels are not binary, pos_label should be explicitly given. y_score : array, shape = [n_samples] Target scores, can either be probability estimates of the positive class or confidence values. pos_label : int Label considered as positive and others are considered negative. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- fpr : array, shape = [>2] Increasing false positive rates such that element i is the false positive rate of predictions with score >= thresholds[i]. tpr : array, shape = [>2] Increasing true positive rates such that element i is the true positive rate of predictions with score >= thresholds[i]. thresholds : array, shape = [n_thresholds] Decreasing thresholds on the decision function used to compute fpr and tpr. `thresholds[0]` represents no instances being predicted and is arbitrarily set to `max(y_score) + 1`. See also -------- roc_auc_score : Compute Area Under the Curve (AUC) from prediction scores Notes ----- Since the thresholds are sorted from low to high values, they are reversed upon returning them to ensure they correspond to both ``fpr`` and ``tpr``, which are sorted in reversed order during their calculation. References ---------- .. [1] `Wikipedia entry for the Receiver operating characteristic <http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_ Examples -------- >>> import numpy as np >>> from sklearn import metrics >>> y = np.array([1, 1, 2, 2]) >>> scores = np.array([0.1, 0.4, 0.35, 0.8]) >>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2) >>> fpr array([ 0. , 0.5, 0.5, 1. ]) >>> tpr array([ 0.5, 0.5, 1. , 1. ]) >>> thresholds array([ 0.8 , 0.4 , 0.35, 0.1 ]) """ fps, tps, thresholds = _binary_clf_curve( y_true, y_score, pos_label=pos_label, sample_weight=sample_weight) if tps.size == 0 or fps[0] != 0: # Add an extra threshold position if necessary tps = np.r_[0, tps] fps = np.r_[0, fps] thresholds = np.r_[thresholds[0] + 1, thresholds] if fps[-1] <= 0: warnings.warn("No negative samples in y_true, " "false positive value should be meaningless", UndefinedMetricWarning) fpr = np.repeat(np.nan, fps.shape) else: fpr = fps / fps[-1] if tps[-1] <= 0: warnings.warn("No positive samples in y_true, " "true positive value should be meaningless", UndefinedMetricWarning) tpr = np.repeat(np.nan, tps.shape) else: tpr = tps / tps[-1] return fpr, tpr, thresholds def label_ranking_average_precision_score(y_true, y_score): """Compute ranking-based average precision Label ranking average precision (LRAP) is the average over each ground truth label assigned to each sample, of the ratio of true vs. total labels with lower score. This metric is used in multilabel ranking problem, where the goal is to give better rank to the labels associated to each sample. The obtained score is always strictly greater than 0 and the best value is 1. Read more in the :ref:`User Guide <label_ranking_average_precision>`. Parameters ---------- y_true : array or sparse matrix, shape = [n_samples, n_labels] True binary labels in binary indicator format. y_score : array, shape = [n_samples, n_labels] Target scores, can either be probability estimates of the positive class, confidence values, or binary decisions. Returns ------- score : float Examples -------- >>> import numpy as np >>> from sklearn.metrics import label_ranking_average_precision_score >>> y_true = np.array([[1, 0, 0], [0, 0, 1]]) >>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]]) >>> label_ranking_average_precision_score(y_true, y_score) \ # doctest: +ELLIPSIS 0.416... """ check_consistent_length(y_true, y_score) y_true = check_array(y_true, ensure_2d=False) y_score = check_array(y_score, ensure_2d=False) if y_true.shape != y_score.shape: raise ValueError("y_true and y_score have different shape") # Handle badly formated array and the degenerate case with one label y_type = type_of_target(y_true) if (y_type != "multilabel-indicator" and not (y_type == "binary" and y_true.ndim == 2)): raise ValueError("{0} format is not supported".format(y_type)) y_true = csr_matrix(y_true) y_score = -y_score n_samples, n_labels = y_true.shape out = 0. for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])): relevant = y_true.indices[start:stop] if (relevant.size == 0 or relevant.size == n_labels): # If all labels are relevant or unrelevant, the score is also # equal to 1. The label ranking has no meaning. out += 1. continue scores_i = y_score[i] rank = rankdata(scores_i, 'max')[relevant] L = rankdata(scores_i[relevant], 'max') out += (L / rank).mean() return out / n_samples def coverage_error(y_true, y_score, sample_weight=None): """Coverage error measure Compute how far we need to go through the ranked scores to cover all true labels. The best value is equal to the average number of labels in ``y_true`` per sample. Ties in ``y_scores`` are broken by giving maximal rank that would have been assigned to all tied values. Read more in the :ref:`User Guide <coverage_error>`. Parameters ---------- y_true : array, shape = [n_samples, n_labels] True binary labels in binary indicator format. y_score : array, shape = [n_samples, n_labels] Target scores, can either be probability estimates of the positive class, confidence values, or binary decisions. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- coverage_error : float References ---------- .. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010). Mining multi-label data. In Data mining and knowledge discovery handbook (pp. 667-685). Springer US. """ y_true = check_array(y_true, ensure_2d=False) y_score = check_array(y_score, ensure_2d=False) check_consistent_length(y_true, y_score, sample_weight) y_type = type_of_target(y_true) if y_type != "multilabel-indicator": raise ValueError("{0} format is not supported".format(y_type)) if y_true.shape != y_score.shape: raise ValueError("y_true and y_score have different shape") y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true)) y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1)) coverage = (y_score >= y_min_relevant).sum(axis=1) coverage = coverage.filled(0) return np.average(coverage, weights=sample_weight) def label_ranking_loss(y_true, y_score, sample_weight=None): """Compute Ranking loss measure Compute the average number of label pairs that are incorrectly ordered given y_score weighted by the size of the label set and the number of labels not in the label set. This is similar to the error set size, but weighted by the number of relevant and irrelevant labels. The best performance is achieved with a ranking loss of zero. Read more in the :ref:`User Guide <label_ranking_loss>`. Parameters ---------- y_true : array or sparse matrix, shape = [n_samples, n_labels] True binary labels in binary indicator format. y_score : array, shape = [n_samples, n_labels] Target scores, can either be probability estimates of the positive class, confidence values, or binary decisions. sample_weight : array-like of shape = [n_samples], optional Sample weights. Returns ------- loss : float References ---------- .. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010). Mining multi-label data. In Data mining and knowledge discovery handbook (pp. 667-685). Springer US. """ y_true = check_array(y_true, ensure_2d=False, accept_sparse='csr') y_score = check_array(y_score, ensure_2d=False) check_consistent_length(y_true, y_score, sample_weight) y_type = type_of_target(y_true) if y_type not in ("multilabel-indicator",): raise ValueError("{0} format is not supported".format(y_type)) if y_true.shape != y_score.shape: raise ValueError("y_true and y_score have different shape") n_samples, n_labels = y_true.shape y_true = csr_matrix(y_true) loss = np.zeros(n_samples) for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])): # Sort and bin the label scores unique_scores, unique_inverse = np.unique(y_score[i], return_inverse=True) true_at_reversed_rank = bincount( unique_inverse[y_true.indices[start:stop]], minlength=len(unique_scores)) all_at_reversed_rank = bincount(unique_inverse, minlength=len(unique_scores)) false_at_reversed_rank = all_at_reversed_rank - true_at_reversed_rank # if the scores are ordered, it's possible to count the number of # incorrectly ordered paires in linear time by cumulatively counting # how many false labels of a given score have a score higher than the # accumulated true labels with lower score. loss[i] = np.dot(true_at_reversed_rank.cumsum(), false_at_reversed_rank) n_positives = count_nonzero(y_true, axis=1) with np.errstate(divide="ignore", invalid="ignore"): loss /= ((n_labels - n_positives) * n_positives) # When there is no positive or no negative labels, those values should # be consider as correct, i.e. the ranking doesn't matter. loss[np.logical_or(n_positives == 0, n_positives == n_labels)] = 0. return np.average(loss, weights=sample_weight)
bsd-3-clause
etkirsch/scikit-learn
examples/svm/plot_custom_kernel.py
171
1546
""" ====================== SVM with custom kernel ====================== Simple usage of Support Vector Machines to classify a sample. It will plot the decision surface and the support vectors. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import svm, datasets # import some data to play with iris = datasets.load_iris() X = iris.data[:, :2] # we only take the first two features. We could # avoid this ugly slicing by using a two-dim dataset Y = iris.target def my_kernel(X, Y): """ We create a custom kernel: (2 0) k(X, Y) = X ( ) Y.T (0 1) """ M = np.array([[2, 0], [0, 1.0]]) return np.dot(np.dot(X, M), Y.T) h = .02 # step size in the mesh # we create an instance of SVM and fit out data. clf = svm.SVC(kernel=my_kernel) clf.fit(X, Y) # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, m_max]x[y_min, y_max]. x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired) # Plot also the training points plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired) plt.title('3-Class classification using Support Vector Machine with custom' ' kernel') plt.axis('tight') plt.show()
bsd-3-clause
fengzhyuan/scikit-learn
examples/semi_supervised/plot_label_propagation_versus_svm_iris.py
286
2378
""" ===================================================================== Decision boundary of label propagation versus SVM on the Iris dataset ===================================================================== Comparison for decision boundary generated on iris dataset between Label Propagation and SVM. This demonstrates Label Propagation learning a good boundary even with a small amount of labeled data. """ print(__doc__) # Authors: Clay Woolam <clay@woolam.org> # Licence: BSD import numpy as np import matplotlib.pyplot as plt from sklearn import datasets from sklearn import svm from sklearn.semi_supervised import label_propagation rng = np.random.RandomState(0) iris = datasets.load_iris() X = iris.data[:, :2] y = iris.target # step size in the mesh h = .02 y_30 = np.copy(y) y_30[rng.rand(len(y)) < 0.3] = -1 y_50 = np.copy(y) y_50[rng.rand(len(y)) < 0.5] = -1 # we create an instance of SVM and fit out data. We do not scale our # data since we want to plot the support vectors ls30 = (label_propagation.LabelSpreading().fit(X, y_30), y_30) ls50 = (label_propagation.LabelSpreading().fit(X, y_50), y_50) ls100 = (label_propagation.LabelSpreading().fit(X, y), y) rbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y) # create a mesh to plot in x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # title for the plots titles = ['Label Spreading 30% data', 'Label Spreading 50% data', 'Label Spreading 100% data', 'SVC with rbf kernel'] color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)} for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)): # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, m_max]x[y_min, y_max]. plt.subplot(2, 2, i + 1) Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.contourf(xx, yy, Z, cmap=plt.cm.Paired) plt.axis('off') # Plot also the training points colors = [color_map[y] for y in y_train] plt.scatter(X[:, 0], X[:, 1], c=colors, cmap=plt.cm.Paired) plt.title(titles[i]) plt.text(.90, 0, "Unlabeled points are colored white") plt.show()
bsd-3-clause
ngoix/OCRF
sklearn/linear_model/ransac.py
14
17163
# coding: utf-8 # Author: Johannes Schönberger # # License: BSD 3 clause import numpy as np import warnings from ..base import BaseEstimator, MetaEstimatorMixin, RegressorMixin, clone from ..utils import check_random_state, check_array, check_consistent_length from ..utils.random import sample_without_replacement from ..utils.validation import check_is_fitted from .base import LinearRegression from ..utils.validation import has_fit_parameter _EPSILON = np.spacing(1) def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability): """Determine number trials such that at least one outlier-free subset is sampled for the given inlier/outlier ratio. Parameters ---------- n_inliers : int Number of inliers in the data. n_samples : int Total number of samples in the data. min_samples : int Minimum number of samples chosen randomly from original data. probability : float Probability (confidence) that one outlier-free sample is generated. Returns ------- trials : int Number of trials. """ inlier_ratio = n_inliers / float(n_samples) nom = max(_EPSILON, 1 - probability) denom = max(_EPSILON, 1 - inlier_ratio ** min_samples) if nom == 1: return 0 if denom == 1: return float('inf') return abs(float(np.ceil(np.log(nom) / np.log(denom)))) class RANSACRegressor(BaseEstimator, MetaEstimatorMixin, RegressorMixin): """RANSAC (RANdom SAmple Consensus) algorithm. RANSAC is an iterative algorithm for the robust estimation of parameters from a subset of inliers from the complete data set. More information can be found in the general documentation of linear models. A detailed description of the algorithm can be found in the documentation of the ``linear_model`` sub-package. Read more in the :ref:`User Guide <ransac_regression>`. Parameters ---------- base_estimator : object, optional Base estimator object which implements the following methods: * `fit(X, y)`: Fit model to given training data and target values. * `score(X, y)`: Returns the mean accuracy on the given test data, which is used for the stop criterion defined by `stop_score`. Additionally, the score is used to decide which of two equally large consensus sets is chosen as the better one. If `base_estimator` is None, then ``base_estimator=sklearn.linear_model.LinearRegression()`` is used for target values of dtype float. Note that the current implementation only supports regression estimators. min_samples : int (>= 1) or float ([0, 1]), optional Minimum number of samples chosen randomly from original data. Treated as an absolute number of samples for `min_samples >= 1`, treated as a relative number `ceil(min_samples * X.shape[0]`) for `min_samples < 1`. This is typically chosen as the minimal number of samples necessary to estimate the given `base_estimator`. By default a ``sklearn.linear_model.LinearRegression()`` estimator is assumed and `min_samples` is chosen as ``X.shape[1] + 1``. residual_threshold : float, optional Maximum residual for a data sample to be classified as an inlier. By default the threshold is chosen as the MAD (median absolute deviation) of the target values `y`. is_data_valid : callable, optional This function is called with the randomly selected data before the model is fitted to it: `is_data_valid(X, y)`. If its return value is False the current randomly chosen sub-sample is skipped. is_model_valid : callable, optional This function is called with the estimated model and the randomly selected data: `is_model_valid(model, X, y)`. If its return value is False the current randomly chosen sub-sample is skipped. Rejecting samples with this function is computationally costlier than with `is_data_valid`. `is_model_valid` should therefore only be used if the estimated model is needed for making the rejection decision. max_trials : int, optional Maximum number of iterations for random sample selection. stop_n_inliers : int, optional Stop iteration if at least this number of inliers are found. stop_score : float, optional Stop iteration if score is greater equal than this threshold. stop_probability : float in range [0, 1], optional RANSAC iteration stops if at least one outlier-free set of the training data is sampled in RANSAC. This requires to generate at least N samples (iterations):: N >= log(1 - probability) / log(1 - e**m) where the probability (confidence) is typically set to high value such as 0.99 (the default) and e is the current fraction of inliers w.r.t. the total number of samples. residual_metric : callable, optional Metric to reduce the dimensionality of the residuals to 1 for multi-dimensional target values ``y.shape[1] > 1``. By default the sum of absolute differences is used:: lambda dy: np.sum(np.abs(dy), axis=1) NOTE: residual_metric is deprecated from 0.18 and will be removed in 0.20 Use ``loss`` instead. loss: string, callable, optional, default "absolute_loss" String inputs, "absolute_loss" and "squared_loss" are supported which find the absolute loss and squared loss per sample respectively. If ``loss`` is a callable, then it should be a function that takes two arrays as inputs, the true and predicted value and returns a 1-D array with the ``i``th value of the array corresponding to the loss on `X[i]`. If the loss on a sample is greater than the ``residual_threshold``, then this sample is classified as an outlier. random_state : integer or numpy.RandomState, optional The generator used to initialize the centers. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. Attributes ---------- estimator_ : object Best fitted model (copy of the `base_estimator` object). n_trials_ : int Number of random selection trials until one of the stop criteria is met. It is always ``<= max_trials``. inlier_mask_ : bool array of shape [n_samples] Boolean mask of inliers classified as ``True``. References ---------- .. [1] http://en.wikipedia.org/wiki/RANSAC .. [2] http://www.cs.columbia.edu/~belhumeur/courses/compPhoto/ransac.pdf .. [3] http://www.bmva.org/bmvc/2009/Papers/Paper355/Paper355.pdf """ def __init__(self, base_estimator=None, min_samples=None, residual_threshold=None, is_data_valid=None, is_model_valid=None, max_trials=100, stop_n_inliers=np.inf, stop_score=np.inf, stop_probability=0.99, residual_metric=None, loss='absolute_loss', random_state=None): self.base_estimator = base_estimator self.min_samples = min_samples self.residual_threshold = residual_threshold self.is_data_valid = is_data_valid self.is_model_valid = is_model_valid self.max_trials = max_trials self.stop_n_inliers = stop_n_inliers self.stop_score = stop_score self.stop_probability = stop_probability self.residual_metric = residual_metric self.random_state = random_state self.loss = loss def fit(self, X, y, sample_weight=None): """Fit estimator using RANSAC algorithm. Parameters ---------- X : array-like or sparse matrix, shape [n_samples, n_features] Training data. y : array-like, shape = [n_samples] or [n_samples, n_targets] Target values. sample_weight: array-like, shape = [n_samples] Individual weights for each sample raises error if sample_weight is passed and base_estimator fit method does not support it. Raises ------ ValueError If no valid consensus set could be found. This occurs if `is_data_valid` and `is_model_valid` return False for all `max_trials` randomly chosen sub-samples. """ X = check_array(X, accept_sparse='csr') y = check_array(y, ensure_2d=False) check_consistent_length(X, y) if self.base_estimator is not None: base_estimator = clone(self.base_estimator) else: base_estimator = LinearRegression() if self.min_samples is None: # assume linear model by default min_samples = X.shape[1] + 1 elif 0 < self.min_samples < 1: min_samples = np.ceil(self.min_samples * X.shape[0]) elif self.min_samples >= 1: if self.min_samples % 1 != 0: raise ValueError("Absolute number of samples must be an " "integer value.") min_samples = self.min_samples else: raise ValueError("Value for `min_samples` must be scalar and " "positive.") if min_samples > X.shape[0]: raise ValueError("`min_samples` may not be larger than number " "of samples ``X.shape[0]``.") if self.stop_probability < 0 or self.stop_probability > 1: raise ValueError("`stop_probability` must be in range [0, 1].") if self.residual_threshold is None: # MAD (median absolute deviation) residual_threshold = np.median(np.abs(y - np.median(y))) else: residual_threshold = self.residual_threshold if self.residual_metric is not None: warnings.warn( "'residual_metric' will be removed in version 0.20. Use " "'loss' instead.", DeprecationWarning) if self.loss == "absolute_loss": if y.ndim == 1: loss_function = lambda y_true, y_pred: np.abs(y_true - y_pred) else: loss_function = lambda \ y_true, y_pred: np.sum(np.abs(y_true - y_pred), axis=1) elif self.loss == "squared_loss": if y.ndim == 1: loss_function = lambda y_true, y_pred: (y_true - y_pred) ** 2 else: loss_function = lambda \ y_true, y_pred: np.sum((y_true - y_pred) ** 2, axis=1) elif callable(self.loss): loss_function = self.loss else: raise ValueError( "loss should be 'absolute_loss', 'squared_loss' or a callable." "Got %s. " % self.loss) random_state = check_random_state(self.random_state) try: # Not all estimator accept a random_state base_estimator.set_params(random_state=random_state) except ValueError: pass estimator_fit_has_sample_weight = has_fit_parameter(base_estimator, "sample_weight") estimator_name = type(base_estimator).__name__ if (sample_weight is not None and not estimator_fit_has_sample_weight): raise ValueError("%s does not support sample_weight. Samples" " weights are only used for the calibration" " itself." % estimator_name) if sample_weight is not None: sample_weight = np.asarray(sample_weight) n_inliers_best = 0 score_best = np.inf inlier_mask_best = None X_inlier_best = None y_inlier_best = None # number of data samples n_samples = X.shape[0] sample_idxs = np.arange(n_samples) n_samples, _ = X.shape for self.n_trials_ in range(1, self.max_trials + 1): # choose random sample set subset_idxs = sample_without_replacement(n_samples, min_samples, random_state=random_state) X_subset = X[subset_idxs] y_subset = y[subset_idxs] # check if random sample set is valid if (self.is_data_valid is not None and not self.is_data_valid(X_subset, y_subset)): continue # fit model for current random sample set if sample_weight is None: base_estimator.fit(X_subset, y_subset) else: base_estimator.fit(X_subset, y_subset, sample_weight=sample_weight[subset_idxs]) # check if estimated model is valid if (self.is_model_valid is not None and not self.is_model_valid(base_estimator, X_subset, y_subset)): continue # residuals of all data for current random sample model y_pred = base_estimator.predict(X) # XXX: Deprecation: Remove this if block in 0.20 if self.residual_metric is not None: diff = y_pred - y if diff.ndim == 1: diff = diff.reshape(-1, 1) residuals_subset = self.residual_metric(diff) else: residuals_subset = loss_function(y, y_pred) # classify data into inliers and outliers inlier_mask_subset = residuals_subset < residual_threshold n_inliers_subset = np.sum(inlier_mask_subset) # less inliers -> skip current random sample if n_inliers_subset < n_inliers_best: continue if n_inliers_subset == 0: raise ValueError("No inliers found, possible cause is " "setting residual_threshold ({0}) too low.".format( self.residual_threshold)) # extract inlier data set inlier_idxs_subset = sample_idxs[inlier_mask_subset] X_inlier_subset = X[inlier_idxs_subset] y_inlier_subset = y[inlier_idxs_subset] # score of inlier data set score_subset = base_estimator.score(X_inlier_subset, y_inlier_subset) # same number of inliers but worse score -> skip current random # sample if (n_inliers_subset == n_inliers_best and score_subset < score_best): continue # save current random sample as best sample n_inliers_best = n_inliers_subset score_best = score_subset inlier_mask_best = inlier_mask_subset X_inlier_best = X_inlier_subset y_inlier_best = y_inlier_subset # break if sufficient number of inliers or score is reached if (n_inliers_best >= self.stop_n_inliers or score_best >= self.stop_score or self.n_trials_ >= _dynamic_max_trials(n_inliers_best, n_samples, min_samples, self.stop_probability)): break # if none of the iterations met the required criteria if inlier_mask_best is None: raise ValueError( "RANSAC could not find valid consensus set, because" " either the `residual_threshold` rejected all the samples or" " `is_data_valid` and `is_model_valid` returned False for all" " `max_trials` randomly ""chosen sub-samples. Consider " "relaxing the ""constraints.") # estimate final model using all inliers base_estimator.fit(X_inlier_best, y_inlier_best) self.estimator_ = base_estimator self.inlier_mask_ = inlier_mask_best return self def predict(self, X): """Predict using the estimated model. This is a wrapper for `estimator_.predict(X)`. Parameters ---------- X : numpy array of shape [n_samples, n_features] Returns ------- y : array, shape = [n_samples] or [n_samples, n_targets] Returns predicted values. """ check_is_fitted(self, 'estimator_') return self.estimator_.predict(X) def score(self, X, y): """Returns the score of the prediction. This is a wrapper for `estimator_.score(X, y)`. Parameters ---------- X : numpy array or sparse matrix of shape [n_samples, n_features] Training data. y : array, shape = [n_samples] or [n_samples, n_targets] Target values. Returns ------- z : float Score of the prediction. """ check_is_fitted(self, 'estimator_') return self.estimator_.score(X, y)
bsd-3-clause
shikhardb/scikit-learn
sklearn/covariance/__init__.py
389
1157
""" The :mod:`sklearn.covariance` module includes methods and algorithms to robustly estimate the covariance of features given a set of points. The precision matrix defined as the inverse of the covariance is also estimated. Covariance estimation is closely related to the theory of Gaussian Graphical Models. """ from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance, \ log_likelihood from .shrunk_covariance_ import shrunk_covariance, ShrunkCovariance, \ ledoit_wolf, ledoit_wolf_shrinkage, \ LedoitWolf, oas, OAS from .robust_covariance import fast_mcd, MinCovDet from .graph_lasso_ import graph_lasso, GraphLasso, GraphLassoCV from .outlier_detection import EllipticEnvelope __all__ = ['EllipticEnvelope', 'EmpiricalCovariance', 'GraphLasso', 'GraphLassoCV', 'LedoitWolf', 'MinCovDet', 'OAS', 'ShrunkCovariance', 'empirical_covariance', 'fast_mcd', 'graph_lasso', 'ledoit_wolf', 'ledoit_wolf_shrinkage', 'log_likelihood', 'oas', 'shrunk_covariance']
bsd-3-clause
yanlend/scikit-learn
examples/datasets/plot_iris_dataset.py
283
1928
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= The Iris Dataset ========================================================= This data sets consists of 3 different types of irises' (Setosa, Versicolour, and Virginica) petal and sepal length, stored in a 150x4 numpy.ndarray The rows being the samples and the columns being: Sepal Length, Sepal Width, Petal Length and Petal Width. The below plot uses the first two features. See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more information on this dataset. """ print(__doc__) # Code source: Gaël Varoquaux # Modified for documentation by Jaques Grobler # License: BSD 3 clause import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from sklearn import datasets from sklearn.decomposition import PCA # import some data to play with iris = datasets.load_iris() X = iris.data[:, :2] # we only take the first two features. Y = iris.target x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5 y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5 plt.figure(2, figsize=(8, 6)) plt.clf() # Plot the training points plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired) plt.xlabel('Sepal length') plt.ylabel('Sepal width') plt.xlim(x_min, x_max) plt.ylim(y_min, y_max) plt.xticks(()) plt.yticks(()) # To getter a better understanding of interaction of the dimensions # plot the first three PCA dimensions fig = plt.figure(1, figsize=(8, 6)) ax = Axes3D(fig, elev=-150, azim=110) X_reduced = PCA(n_components=3).fit_transform(iris.data) ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y, cmap=plt.cm.Paired) ax.set_title("First three PCA directions") ax.set_xlabel("1st eigenvector") ax.w_xaxis.set_ticklabels([]) ax.set_ylabel("2nd eigenvector") ax.w_yaxis.set_ticklabels([]) ax.set_zlabel("3rd eigenvector") ax.w_zaxis.set_ticklabels([]) plt.show()
bsd-3-clause
simon-pepin/scikit-learn
sklearn/semi_supervised/label_propagation.py
128
15312
# coding=utf8 """ Label propagation in the context of this module refers to a set of semisupervised classification algorithms. In the high level, these algorithms work by forming a fully-connected graph between all points given and solving for the steady-state distribution of labels at each point. These algorithms perform very well in practice. The cost of running can be very expensive, at approximately O(N^3) where N is the number of (labeled and unlabeled) points. The theory (why they perform so well) is motivated by intuitions from random walk algorithms and geometric relationships in the data. For more information see the references below. Model Features -------------- Label clamping: The algorithm tries to learn distributions of labels over the dataset. In the "Hard Clamp" mode, the true ground labels are never allowed to change. They are clamped into position. In the "Soft Clamp" mode, they are allowed some wiggle room, but some alpha of their original value will always be retained. Hard clamp is the same as soft clamping with alpha set to 1. Kernel: A function which projects a vector into some higher dimensional space. This implementation supprots RBF and KNN kernels. Using the RBF kernel generates a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of size O(k*N) which will run much faster. See the documentation for SVMs for more info on kernels. Examples -------- >>> from sklearn import datasets >>> from sklearn.semi_supervised import LabelPropagation >>> label_prop_model = LabelPropagation() >>> iris = datasets.load_iris() >>> random_unlabeled_points = np.where(np.random.random_integers(0, 1, ... size=len(iris.target))) >>> labels = np.copy(iris.target) >>> labels[random_unlabeled_points] = -1 >>> label_prop_model.fit(iris.data, labels) ... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS LabelPropagation(...) Notes ----- References: [1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised Learning (2006), pp. 193-216 [2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005 """ # Authors: Clay Woolam <clay@woolam.org> # Licence: BSD from abc import ABCMeta, abstractmethod from scipy import sparse import numpy as np from ..base import BaseEstimator, ClassifierMixin from ..metrics.pairwise import rbf_kernel from ..utils.graph import graph_laplacian from ..utils.extmath import safe_sparse_dot from ..utils.validation import check_X_y, check_is_fitted from ..externals import six from ..neighbors.unsupervised import NearestNeighbors ### Helper functions def _not_converged(y_truth, y_prediction, tol=1e-3): """basic convergence check""" return np.abs(y_truth - y_prediction).sum() > tol class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator, ClassifierMixin)): """Base class for label propagation module. Parameters ---------- kernel : {'knn', 'rbf'} String identifier for kernel function to use. Only 'rbf' and 'knn' kernels are currently supported.. gamma : float Parameter for rbf kernel alpha : float Clamping factor max_iter : float Change maximum number of iterations allowed tol : float Convergence tolerance: threshold to consider the system at steady state n_neighbors : integer > 0 Parameter for knn kernel """ def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=1, max_iter=30, tol=1e-3): self.max_iter = max_iter self.tol = tol # kernel parameters self.kernel = kernel self.gamma = gamma self.n_neighbors = n_neighbors # clamping factor self.alpha = alpha def _get_kernel(self, X, y=None): if self.kernel == "rbf": if y is None: return rbf_kernel(X, X, gamma=self.gamma) else: return rbf_kernel(X, y, gamma=self.gamma) elif self.kernel == "knn": if self.nn_fit is None: self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X) if y is None: return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X, self.n_neighbors, mode='connectivity') else: return self.nn_fit.kneighbors(y, return_distance=False) else: raise ValueError("%s is not a valid kernel. Only rbf and knn" " are supported at this time" % self.kernel) @abstractmethod def _build_graph(self): raise NotImplementedError("Graph construction must be implemented" " to fit a label propagation model.") def predict(self, X): """Performs inductive inference across the model. Parameters ---------- X : array_like, shape = [n_samples, n_features] Returns ------- y : array_like, shape = [n_samples] Predictions for input data """ probas = self.predict_proba(X) return self.classes_[np.argmax(probas, axis=1)].ravel() def predict_proba(self, X): """Predict probability for each possible outcome. Compute the probability estimates for each single sample in X and each possible outcome seen during training (categorical distribution). Parameters ---------- X : array_like, shape = [n_samples, n_features] Returns ------- probabilities : array, shape = [n_samples, n_classes] Normalized probability distributions across class labels """ check_is_fitted(self, 'X_') if sparse.isspmatrix(X): X_2d = X else: X_2d = np.atleast_2d(X) weight_matrices = self._get_kernel(self.X_, X_2d) if self.kernel == 'knn': probabilities = [] for weight_matrix in weight_matrices: ine = np.sum(self.label_distributions_[weight_matrix], axis=0) probabilities.append(ine) probabilities = np.array(probabilities) else: weight_matrices = weight_matrices.T probabilities = np.dot(weight_matrices, self.label_distributions_) normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T probabilities /= normalizer return probabilities def fit(self, X, y): """Fit a semi-supervised label propagation model based All the input data is provided matrix X (labeled and unlabeled) and corresponding label matrix y with a dedicated marker value for unlabeled samples. Parameters ---------- X : array-like, shape = [n_samples, n_features] A {n_samples by n_samples} size matrix will be created from this y : array_like, shape = [n_samples] n_labeled_samples (unlabeled points are marked as -1) All unlabeled samples will be transductively assigned labels Returns ------- self : returns an instance of self. """ X, y = check_X_y(X, y) self.X_ = X # actual graph construction (implementations should override this) graph_matrix = self._build_graph() # label construction # construct a categorical distribution for classification only classes = np.unique(y) classes = (classes[classes != -1]) self.classes_ = classes n_samples, n_classes = len(y), len(classes) y = np.asarray(y) unlabeled = y == -1 clamp_weights = np.ones((n_samples, 1)) clamp_weights[unlabeled, 0] = self.alpha # initialize distributions self.label_distributions_ = np.zeros((n_samples, n_classes)) for label in classes: self.label_distributions_[y == label, classes == label] = 1 y_static = np.copy(self.label_distributions_) if self.alpha > 0.: y_static *= 1 - self.alpha y_static[unlabeled] = 0 l_previous = np.zeros((self.X_.shape[0], n_classes)) remaining_iter = self.max_iter if sparse.isspmatrix(graph_matrix): graph_matrix = graph_matrix.tocsr() while (_not_converged(self.label_distributions_, l_previous, self.tol) and remaining_iter > 1): l_previous = self.label_distributions_ self.label_distributions_ = safe_sparse_dot( graph_matrix, self.label_distributions_) # clamp self.label_distributions_ = np.multiply( clamp_weights, self.label_distributions_) + y_static remaining_iter -= 1 normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis] self.label_distributions_ /= normalizer # set the transduction item transduction = self.classes_[np.argmax(self.label_distributions_, axis=1)] self.transduction_ = transduction.ravel() self.n_iter_ = self.max_iter - remaining_iter return self class LabelPropagation(BaseLabelPropagation): """Label Propagation classifier Read more in the :ref:`User Guide <label_propagation>`. Parameters ---------- kernel : {'knn', 'rbf'} String identifier for kernel function to use. Only 'rbf' and 'knn' kernels are currently supported.. gamma : float Parameter for rbf kernel n_neighbors : integer > 0 Parameter for knn kernel alpha : float Clamping factor max_iter : float Change maximum number of iterations allowed tol : float Convergence tolerance: threshold to consider the system at steady state Attributes ---------- X_ : array, shape = [n_samples, n_features] Input array. classes_ : array, shape = [n_classes] The distinct labels used in classifying instances. label_distributions_ : array, shape = [n_samples, n_classes] Categorical distribution for each item. transduction_ : array, shape = [n_samples] Label assigned to each item via the transduction. n_iter_ : int Number of iterations run. Examples -------- >>> from sklearn import datasets >>> from sklearn.semi_supervised import LabelPropagation >>> label_prop_model = LabelPropagation() >>> iris = datasets.load_iris() >>> random_unlabeled_points = np.where(np.random.random_integers(0, 1, ... size=len(iris.target))) >>> labels = np.copy(iris.target) >>> labels[random_unlabeled_points] = -1 >>> label_prop_model.fit(iris.data, labels) ... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS LabelPropagation(...) References ---------- Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf See Also -------- LabelSpreading : Alternate label propagation strategy more robust to noise """ def _build_graph(self): """Matrix representing a fully connected graph between each sample This basic implementation creates a non-stochastic affinity matrix, so class distributions will exceed 1 (normalization may be desired). """ if self.kernel == 'knn': self.nn_fit = None affinity_matrix = self._get_kernel(self.X_) normalizer = affinity_matrix.sum(axis=0) if sparse.isspmatrix(affinity_matrix): affinity_matrix.data /= np.diag(np.array(normalizer)) else: affinity_matrix /= normalizer[:, np.newaxis] return affinity_matrix class LabelSpreading(BaseLabelPropagation): """LabelSpreading model for semi-supervised learning This model is similar to the basic Label Propgation algorithm, but uses affinity matrix based on the normalized graph Laplacian and soft clamping across the labels. Read more in the :ref:`User Guide <label_propagation>`. Parameters ---------- kernel : {'knn', 'rbf'} String identifier for kernel function to use. Only 'rbf' and 'knn' kernels are currently supported. gamma : float parameter for rbf kernel n_neighbors : integer > 0 parameter for knn kernel alpha : float clamping factor max_iter : float maximum number of iterations allowed tol : float Convergence tolerance: threshold to consider the system at steady state Attributes ---------- X_ : array, shape = [n_samples, n_features] Input array. classes_ : array, shape = [n_classes] The distinct labels used in classifying instances. label_distributions_ : array, shape = [n_samples, n_classes] Categorical distribution for each item. transduction_ : array, shape = [n_samples] Label assigned to each item via the transduction. n_iter_ : int Number of iterations run. Examples -------- >>> from sklearn import datasets >>> from sklearn.semi_supervised import LabelSpreading >>> label_prop_model = LabelSpreading() >>> iris = datasets.load_iris() >>> random_unlabeled_points = np.where(np.random.random_integers(0, 1, ... size=len(iris.target))) >>> labels = np.copy(iris.target) >>> labels[random_unlabeled_points] = -1 >>> label_prop_model.fit(iris.data, labels) ... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS LabelSpreading(...) References ---------- Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston, Bernhard Schoelkopf. Learning with local and global consistency (2004) http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219 See Also -------- LabelPropagation : Unregularized graph based semi-supervised learning """ def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2, max_iter=30, tol=1e-3): # this one has different base parameters super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma, n_neighbors=n_neighbors, alpha=alpha, max_iter=max_iter, tol=tol) def _build_graph(self): """Graph matrix for Label Spreading computes the graph laplacian""" # compute affinity matrix (or gram matrix) if self.kernel == 'knn': self.nn_fit = None n_samples = self.X_.shape[0] affinity_matrix = self._get_kernel(self.X_) laplacian = graph_laplacian(affinity_matrix, normed=True) laplacian = -laplacian if sparse.isspmatrix(laplacian): diag_mask = (laplacian.row == laplacian.col) laplacian.data[diag_mask] = 0.0 else: laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0 return laplacian
bsd-3-clause
macks22/scikit-learn
examples/semi_supervised/plot_label_propagation_structure.py
247
2432
""" ============================================== Label Propagation learning a complex structure ============================================== Example of LabelPropagation learning a complex internal structure to demonstrate "manifold learning". The outer circle should be labeled "red" and the inner circle "blue". Because both label groups lie inside their own distinct shape, we can see that the labels propagate correctly around the circle. """ print(__doc__) # Authors: Clay Woolam <clay@woolam.org> # Andreas Mueller <amueller@ais.uni-bonn.de> # Licence: BSD import numpy as np import matplotlib.pyplot as plt from sklearn.semi_supervised import label_propagation from sklearn.datasets import make_circles # generate ring with inner box n_samples = 200 X, y = make_circles(n_samples=n_samples, shuffle=False) outer, inner = 0, 1 labels = -np.ones(n_samples) labels[0] = outer labels[-1] = inner ############################################################################### # Learn with LabelSpreading label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0) label_spread.fit(X, labels) ############################################################################### # Plot output labels output_labels = label_spread.transduction_ plt.figure(figsize=(8.5, 4)) plt.subplot(1, 2, 1) plot_outer_labeled, = plt.plot(X[labels == outer, 0], X[labels == outer, 1], 'rs') plot_unlabeled, = plt.plot(X[labels == -1, 0], X[labels == -1, 1], 'g.') plot_inner_labeled, = plt.plot(X[labels == inner, 0], X[labels == inner, 1], 'bs') plt.legend((plot_outer_labeled, plot_inner_labeled, plot_unlabeled), ('Outer Labeled', 'Inner Labeled', 'Unlabeled'), 'upper left', numpoints=1, shadow=False) plt.title("Raw data (2 classes=red and blue)") plt.subplot(1, 2, 2) output_label_array = np.asarray(output_labels) outer_numbers = np.where(output_label_array == outer)[0] inner_numbers = np.where(output_label_array == inner)[0] plot_outer, = plt.plot(X[outer_numbers, 0], X[outer_numbers, 1], 'rs') plot_inner, = plt.plot(X[inner_numbers, 0], X[inner_numbers, 1], 'bs') plt.legend((plot_outer, plot_inner), ('Outer Learned', 'Inner Learned'), 'upper left', numpoints=1, shadow=False) plt.title("Labels learned with Label Spreading (KNN)") plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92) plt.show()
bsd-3-clause
jkarnows/scikit-learn
benchmarks/bench_glm.py
297
1493
""" A comparison of different methods in GLM Data comes from a random square matrix. """ from datetime import datetime import numpy as np from sklearn import linear_model from sklearn.utils.bench import total_seconds if __name__ == '__main__': import pylab as pl n_iter = 40 time_ridge = np.empty(n_iter) time_ols = np.empty(n_iter) time_lasso = np.empty(n_iter) dimensions = 500 * np.arange(1, n_iter + 1) for i in range(n_iter): print('Iteration %s of %s' % (i, n_iter)) n_samples, n_features = 10 * i + 3, 10 * i + 3 X = np.random.randn(n_samples, n_features) Y = np.random.randn(n_samples) start = datetime.now() ridge = linear_model.Ridge(alpha=1.) ridge.fit(X, Y) time_ridge[i] = total_seconds(datetime.now() - start) start = datetime.now() ols = linear_model.LinearRegression() ols.fit(X, Y) time_ols[i] = total_seconds(datetime.now() - start) start = datetime.now() lasso = linear_model.LassoLars() lasso.fit(X, Y) time_lasso[i] = total_seconds(datetime.now() - start) pl.figure('scikit-learn GLM benchmark results') pl.xlabel('Dimensions') pl.ylabel('Time (s)') pl.plot(dimensions, time_ridge, color='r') pl.plot(dimensions, time_ols, color='g') pl.plot(dimensions, time_lasso, color='b') pl.legend(['Ridge', 'OLS', 'LassoLars'], loc='upper left') pl.axis('tight') pl.show()
bsd-3-clause
russel1237/scikit-learn
examples/svm/plot_weighted_samples.py
188
1943
""" ===================== SVM: Weighted samples ===================== Plot decision function of a weighted dataset, where the size of points is proportional to its weight. The sample weighting rescales the C parameter, which means that the classifier puts more emphasis on getting these points right. The effect might often be subtle. To emphasize the effect here, we particularly weight outliers, making the deformation of the decision boundary very visible. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import svm def plot_decision_function(classifier, sample_weight, axis, title): # plot the decision function xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500)) Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) # plot the line, the points, and the nearest vectors to the plane axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone) axis.scatter(X[:, 0], X[:, 1], c=Y, s=100 * sample_weight, alpha=0.9, cmap=plt.cm.bone) axis.axis('off') axis.set_title(title) # we create 20 points np.random.seed(0) X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)] Y = [1] * 10 + [-1] * 10 sample_weight_last_ten = abs(np.random.randn(len(X))) sample_weight_constant = np.ones(len(X)) # and bigger weights to some outliers sample_weight_last_ten[15:] *= 5 sample_weight_last_ten[9] *= 15 # for reference, first fit without class weights # fit the model clf_weights = svm.SVC() clf_weights.fit(X, Y, sample_weight=sample_weight_last_ten) clf_no_weights = svm.SVC() clf_no_weights.fit(X, Y) fig, axes = plt.subplots(1, 2, figsize=(14, 6)) plot_decision_function(clf_no_weights, sample_weight_constant, axes[0], "Constant weights") plot_decision_function(clf_weights, sample_weight_last_ten, axes[1], "Modified weights") plt.show()
bsd-3-clause
LiaoPan/scikit-learn
examples/exercises/plot_iris_exercise.py
323
1602
""" ================================ SVM Exercise ================================ A tutorial exercise for using different SVM kernels. This exercise is used in the :ref:`using_kernels_tut` part of the :ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import datasets, svm iris = datasets.load_iris() X = iris.data y = iris.target X = X[y != 0, :2] y = y[y != 0] n_sample = len(X) np.random.seed(0) order = np.random.permutation(n_sample) X = X[order] y = y[order].astype(np.float) X_train = X[:.9 * n_sample] y_train = y[:.9 * n_sample] X_test = X[.9 * n_sample:] y_test = y[.9 * n_sample:] # fit the model for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')): clf = svm.SVC(kernel=kernel, gamma=10) clf.fit(X_train, y_train) plt.figure(fig_num) plt.clf() plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired) # Circle out the test data plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10) plt.axis('tight') x_min = X[:, 0].min() x_max = X[:, 0].max() y_min = X[:, 1].min() y_max = X[:, 1].max() XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j] Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()]) # Put the result into a color plot Z = Z.reshape(XX.shape) plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired) plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'], levels=[-.5, 0, .5]) plt.title(kernel) plt.show()
bsd-3-clause
michigraber/scikit-learn
examples/linear_model/plot_bayesian_ridge.py
248
2588
""" ========================= Bayesian Ridge Regression ========================= Computes a Bayesian Ridge Regression on a synthetic dataset. See :ref:`bayesian_ridge_regression` for more information on the regressor. Compared to the OLS (ordinary least squares) estimator, the coefficient weights are slightly shifted toward zeros, which stabilises them. As the prior on the weights is a Gaussian prior, the histogram of the estimated weights is Gaussian. The estimation of the model is done by iteratively maximizing the marginal log-likelihood of the observations. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from scipy import stats from sklearn.linear_model import BayesianRidge, LinearRegression ############################################################################### # Generating simulated data with Gaussian weigthts np.random.seed(0) n_samples, n_features = 100, 100 X = np.random.randn(n_samples, n_features) # Create Gaussian data # Create weigts with a precision lambda_ of 4. lambda_ = 4. w = np.zeros(n_features) # Only keep 10 weights of interest relevant_features = np.random.randint(0, n_features, 10) for i in relevant_features: w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_)) # Create noise with a precision alpha of 50. alpha_ = 50. noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples) # Create the target y = np.dot(X, w) + noise ############################################################################### # Fit the Bayesian Ridge Regression and an OLS for comparison clf = BayesianRidge(compute_score=True) clf.fit(X, y) ols = LinearRegression() ols.fit(X, y) ############################################################################### # Plot true weights, estimated weights and histogram of the weights plt.figure(figsize=(6, 5)) plt.title("Weights of the model") plt.plot(clf.coef_, 'b-', label="Bayesian Ridge estimate") plt.plot(w, 'g-', label="Ground truth") plt.plot(ols.coef_, 'r--', label="OLS estimate") plt.xlabel("Features") plt.ylabel("Values of the weights") plt.legend(loc="best", prop=dict(size=12)) plt.figure(figsize=(6, 5)) plt.title("Histogram of the weights") plt.hist(clf.coef_, bins=n_features, log=True) plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)), 'ro', label="Relevant features") plt.ylabel("Features") plt.xlabel("Values of the weights") plt.legend(loc="lower left") plt.figure(figsize=(6, 5)) plt.title("Marginal log-likelihood") plt.plot(clf.scores_) plt.ylabel("Score") plt.xlabel("Iterations") plt.show()
bsd-3-clause
xzh86/scikit-learn
sklearn/manifold/tests/test_mds.py
324
1862
import numpy as np from numpy.testing import assert_array_almost_equal from nose.tools import assert_raises from sklearn.manifold import mds def test_smacof(): # test metric smacof using the data of "Modern Multidimensional Scaling", # Borg & Groenen, p 154 sim = np.array([[0, 5, 3, 4], [5, 0, 2, 2], [3, 2, 0, 1], [4, 2, 1, 0]]) Z = np.array([[-.266, -.539], [.451, .252], [.016, -.238], [-.200, .524]]) X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1) X_true = np.array([[-1.415, -2.471], [1.633, 1.107], [.249, -.067], [-.468, 1.431]]) assert_array_almost_equal(X, X_true, decimal=3) def test_smacof_error(): # Not symmetric similarity matrix: sim = np.array([[0, 5, 9, 4], [5, 0, 2, 2], [3, 2, 0, 1], [4, 2, 1, 0]]) assert_raises(ValueError, mds.smacof, sim) # Not squared similarity matrix: sim = np.array([[0, 5, 9, 4], [5, 0, 2, 2], [4, 2, 1, 0]]) assert_raises(ValueError, mds.smacof, sim) # init not None and not correct format: sim = np.array([[0, 5, 3, 4], [5, 0, 2, 2], [3, 2, 0, 1], [4, 2, 1, 0]]) Z = np.array([[-.266, -.539], [.016, -.238], [-.200, .524]]) assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1) def test_MDS(): sim = np.array([[0, 5, 3, 4], [5, 0, 2, 2], [3, 2, 0, 1], [4, 2, 1, 0]]) mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed") mds_clf.fit(sim)
bsd-3-clause
trungnt13/scikit-learn
sklearn/decomposition/tests/test_dict_learning.py
47
8095
import numpy as np from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_raises from sklearn.utils.testing import ignore_warnings from sklearn.decomposition import DictionaryLearning from sklearn.decomposition import MiniBatchDictionaryLearning from sklearn.decomposition import SparseCoder from sklearn.decomposition import dict_learning_online from sklearn.decomposition import sparse_encode rng_global = np.random.RandomState(0) n_samples, n_features = 10, 8 X = rng_global.randn(n_samples, n_features) def test_dict_learning_shapes(): n_components = 5 dico = DictionaryLearning(n_components, random_state=0).fit(X) assert_true(dico.components_.shape == (n_components, n_features)) def test_dict_learning_overcomplete(): n_components = 12 dico = DictionaryLearning(n_components, random_state=0).fit(X) assert_true(dico.components_.shape == (n_components, n_features)) def test_dict_learning_reconstruction(): n_components = 12 dico = DictionaryLearning(n_components, transform_algorithm='omp', transform_alpha=0.001, random_state=0) code = dico.fit(X).transform(X) assert_array_almost_equal(np.dot(code, dico.components_), X) dico.set_params(transform_algorithm='lasso_lars') code = dico.transform(X) assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2) # used to test lars here too, but there's no guarantee the number of # nonzero atoms is right. def test_dict_learning_reconstruction_parallel(): # regression test that parallel reconstruction works with n_jobs=-1 n_components = 12 dico = DictionaryLearning(n_components, transform_algorithm='omp', transform_alpha=0.001, random_state=0, n_jobs=-1) code = dico.fit(X).transform(X) assert_array_almost_equal(np.dot(code, dico.components_), X) dico.set_params(transform_algorithm='lasso_lars') code = dico.transform(X) assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2) def test_dict_learning_nonzero_coefs(): n_components = 4 dico = DictionaryLearning(n_components, transform_algorithm='lars', transform_n_nonzero_coefs=3, random_state=0) code = dico.fit(X).transform(X[1]) assert_true(len(np.flatnonzero(code)) == 3) dico.set_params(transform_algorithm='omp') code = dico.transform(X[1]) assert_equal(len(np.flatnonzero(code)), 3) def test_dict_learning_unknown_fit_algorithm(): n_components = 5 dico = DictionaryLearning(n_components, fit_algorithm='<unknown>') assert_raises(ValueError, dico.fit, X) def test_dict_learning_split(): n_components = 5 dico = DictionaryLearning(n_components, transform_algorithm='threshold', random_state=0) code = dico.fit(X).transform(X) dico.split_sign = True split_code = dico.transform(X) assert_array_equal(split_code[:, :n_components] - split_code[:, n_components:], code) def test_dict_learning_online_shapes(): rng = np.random.RandomState(0) n_components = 8 code, dictionary = dict_learning_online(X, n_components=n_components, alpha=1, random_state=rng) assert_equal(code.shape, (n_samples, n_components)) assert_equal(dictionary.shape, (n_components, n_features)) assert_equal(np.dot(code, dictionary).shape, X.shape) def test_dict_learning_online_verbosity(): n_components = 5 # test verbosity from sklearn.externals.six.moves import cStringIO as StringIO import sys old_stdout = sys.stdout try: sys.stdout = StringIO() dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1, random_state=0) dico.fit(X) dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2, random_state=0) dico.fit(X) dict_learning_online(X, n_components=n_components, alpha=1, verbose=1, random_state=0) dict_learning_online(X, n_components=n_components, alpha=1, verbose=2, random_state=0) finally: sys.stdout = old_stdout assert_true(dico.components_.shape == (n_components, n_features)) def test_dict_learning_online_estimator_shapes(): n_components = 5 dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0) dico.fit(X) assert_true(dico.components_.shape == (n_components, n_features)) def test_dict_learning_online_overcomplete(): n_components = 12 dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0).fit(X) assert_true(dico.components_.shape == (n_components, n_features)) def test_dict_learning_online_initialization(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) dico = MiniBatchDictionaryLearning(n_components, n_iter=0, dict_init=V, random_state=0).fit(X) assert_array_equal(dico.components_, V) def test_dict_learning_online_partial_fit(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) # random init V /= np.sum(V ** 2, axis=1)[:, np.newaxis] dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X), batch_size=1, alpha=1, shuffle=False, dict_init=V, random_state=0).fit(X) dict2 = MiniBatchDictionaryLearning(n_components, alpha=1, n_iter=1, dict_init=V, random_state=0) for i in range(10): for sample in X: dict2.partial_fit(sample) assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) == 0)) assert_array_almost_equal(dict1.components_, dict2.components_, decimal=2) def test_sparse_encode_shapes(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) # random init V /= np.sum(V ** 2, axis=1)[:, np.newaxis] for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'): code = sparse_encode(X, V, algorithm=algo) assert_equal(code.shape, (n_samples, n_components)) def test_sparse_encode_error(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) # random init V /= np.sum(V ** 2, axis=1)[:, np.newaxis] code = sparse_encode(X, V, alpha=0.001) assert_true(not np.all(code == 0)) assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1) def test_sparse_encode_error_default_sparsity(): rng = np.random.RandomState(0) X = rng.randn(100, 64) D = rng.randn(2, 64) code = ignore_warnings(sparse_encode)(X, D, algorithm='omp', n_nonzero_coefs=None) assert_equal(code.shape, (100, 2)) def test_unknown_method(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) # random init assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>") def test_sparse_coder_estimator(): n_components = 12 rng = np.random.RandomState(0) V = rng.randn(n_components, n_features) # random init V /= np.sum(V ** 2, axis=1)[:, np.newaxis] code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars', transform_alpha=0.001).transform(X) assert_true(not np.all(code == 0)) assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
bsd-3-clause
yilei0620/3D_Conditional_Gan
lib/data_utils.py
1
1596
import numpy as np from sklearn import utils as skutils from rng import np_rng, py_rng def center_crop(x, ph, pw=None): if pw is None: pw = ph h, w = x.shape[:2] j = int(round((h - ph)/2.)) i = int(round((w - pw)/2.)) return x[j:j+ph, i:i+pw] def patch(x, ph, pw=None): if pw is None: pw = ph h, w = x.shape[:2] j = py_rng.randint(0, h-ph) i = py_rng.randint(0, w-pw) x = x[j:j+ph, i:i+pw] return x def list_shuffle(*data): idxs = np_rng.permutation(np.arange(len(data[0]))) if len(data) == 1: return [data[0][idx] for idx in idxs] else: return [[d[idx] for idx in idxs] for d in data] def shuffle(*arrays, **options): if isinstance(arrays[0][0], basestring): return list_shuffle(*arrays) else: return skutils.shuffle(*arrays, random_state=np_rng) def OneHot(X, n=None, negative_class=0.): X = np.asarray(X).flatten() if n is None: n = np.max(X) + 1 Xoh = np.ones((len(X), n)) * negative_class Xoh[np.arange(len(X)), X] = 1. return Xoh def iter_data(*data, **kwargs): size = kwargs.get('size', 128) n = kwargs.get('ndata',0) sIndex = kwargs.get('shuffle_index',[]) batches = n / size if n % size != 0: batches += 1 for b in range(batches): start = b * size end = (b + 1) * size if end > n: end = n if len(data) == 1: yield data[0][start:end] else: # print sIndex[start:end] yield tuple([d[sIndex[start:end]] for d in data])
mit
mayblue9/scikit-learn
benchmarks/bench_multilabel_metrics.py
276
7138
#!/usr/bin/env python """ A comparison of multilabel target formats and metrics over them """ from __future__ import division from __future__ import print_function from timeit import timeit from functools import partial import itertools import argparse import sys import matplotlib.pyplot as plt import scipy.sparse as sp import numpy as np from sklearn.datasets import make_multilabel_classification from sklearn.metrics import (f1_score, accuracy_score, hamming_loss, jaccard_similarity_score) from sklearn.utils.testing import ignore_warnings METRICS = { 'f1': partial(f1_score, average='micro'), 'f1-by-sample': partial(f1_score, average='samples'), 'accuracy': accuracy_score, 'hamming': hamming_loss, 'jaccard': jaccard_similarity_score, } FORMATS = { 'sequences': lambda y: [list(np.flatnonzero(s)) for s in y], 'dense': lambda y: y, 'csr': lambda y: sp.csr_matrix(y), 'csc': lambda y: sp.csc_matrix(y), } @ignore_warnings def benchmark(metrics=tuple(v for k, v in sorted(METRICS.items())), formats=tuple(v for k, v in sorted(FORMATS.items())), samples=1000, classes=4, density=.2, n_times=5): """Times metric calculations for a number of inputs Parameters ---------- metrics : array-like of callables (1d or 0d) The metric functions to time. formats : array-like of callables (1d or 0d) These may transform a dense indicator matrix into multilabel representation. samples : array-like of ints (1d or 0d) The number of samples to generate as input. classes : array-like of ints (1d or 0d) The number of classes in the input. density : array-like of ints (1d or 0d) The density of positive labels in the input. n_times : int Time calling the metric n_times times. Returns ------- array of floats shaped like (metrics, formats, samples, classes, density) Time in seconds. """ metrics = np.atleast_1d(metrics) samples = np.atleast_1d(samples) classes = np.atleast_1d(classes) density = np.atleast_1d(density) formats = np.atleast_1d(formats) out = np.zeros((len(metrics), len(formats), len(samples), len(classes), len(density)), dtype=float) it = itertools.product(samples, classes, density) for i, (s, c, d) in enumerate(it): _, y_true = make_multilabel_classification(n_samples=s, n_features=1, n_classes=c, n_labels=d * c, random_state=42) _, y_pred = make_multilabel_classification(n_samples=s, n_features=1, n_classes=c, n_labels=d * c, random_state=84) for j, f in enumerate(formats): f_true = f(y_true) f_pred = f(y_pred) for k, metric in enumerate(metrics): t = timeit(partial(metric, f_true, f_pred), number=n_times) out[k, j].flat[i] = t return out def _tabulate(results, metrics, formats): """Prints results by metric and format Uses the last ([-1]) value of other fields """ column_width = max(max(len(k) for k in formats) + 1, 8) first_width = max(len(k) for k in metrics) head_fmt = ('{:<{fw}s}' + '{:>{cw}s}' * len(formats)) row_fmt = ('{:<{fw}s}' + '{:>{cw}.3f}' * len(formats)) print(head_fmt.format('Metric', *formats, cw=column_width, fw=first_width)) for metric, row in zip(metrics, results[:, :, -1, -1, -1]): print(row_fmt.format(metric, *row, cw=column_width, fw=first_width)) def _plot(results, metrics, formats, title, x_ticks, x_label, format_markers=('x', '|', 'o', '+'), metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')): """ Plot the results by metric, format and some other variable given by x_label """ fig = plt.figure('scikit-learn multilabel metrics benchmarks') plt.title(title) ax = fig.add_subplot(111) for i, metric in enumerate(metrics): for j, format in enumerate(formats): ax.plot(x_ticks, results[i, j].flat, label='{}, {}'.format(metric, format), marker=format_markers[j], color=metric_colors[i % len(metric_colors)]) ax.set_xlabel(x_label) ax.set_ylabel('Time (s)') ax.legend() plt.show() if __name__ == "__main__": ap = argparse.ArgumentParser() ap.add_argument('metrics', nargs='*', default=sorted(METRICS), help='Specifies metrics to benchmark, defaults to all. ' 'Choices are: {}'.format(sorted(METRICS))) ap.add_argument('--formats', nargs='+', choices=sorted(FORMATS), help='Specifies multilabel formats to benchmark ' '(defaults to all).') ap.add_argument('--samples', type=int, default=1000, help='The number of samples to generate') ap.add_argument('--classes', type=int, default=10, help='The number of classes') ap.add_argument('--density', type=float, default=.2, help='The average density of labels per sample') ap.add_argument('--plot', choices=['classes', 'density', 'samples'], default=None, help='Plot time with respect to this parameter varying ' 'up to the specified value') ap.add_argument('--n-steps', default=10, type=int, help='Plot this many points for each metric') ap.add_argument('--n-times', default=5, type=int, help="Time performance over n_times trials") args = ap.parse_args() if args.plot is not None: max_val = getattr(args, args.plot) if args.plot in ('classes', 'samples'): min_val = 2 else: min_val = 0 steps = np.linspace(min_val, max_val, num=args.n_steps + 1)[1:] if args.plot in ('classes', 'samples'): steps = np.unique(np.round(steps).astype(int)) setattr(args, args.plot, steps) if args.metrics is None: args.metrics = sorted(METRICS) if args.formats is None: args.formats = sorted(FORMATS) results = benchmark([METRICS[k] for k in args.metrics], [FORMATS[k] for k in args.formats], args.samples, args.classes, args.density, args.n_times) _tabulate(results, args.metrics, args.formats) if args.plot is not None: print('Displaying plot', file=sys.stderr) title = ('Multilabel metrics with %s' % ', '.join('{0}={1}'.format(field, getattr(args, field)) for field in ['samples', 'classes', 'density'] if args.plot != field)) _plot(results, args.metrics, args.formats, title, steps, args.plot)
bsd-3-clause
Zing22/uemscode
tmp_test.py
1
1634
# -*- coding=utf-8 -*- #### for testing steps import numpy as np import matplotlib.pyplot as plt from sklearn import svm from sklearn.externals import joblib from PIL import Image from process import toBin, cropLetters from img2feature import toFeature from main import readAllFiles TEMP_DIR = 'tmp/' def test_onePic(): path = input('Pic path:') img = Image.open(path) bimg = toBin(img) bimg.save(TEMP_DIR+ 'bimg.jpg') success, letters = cropLetters(bimg) if not success: print('Crop failed.') print(letters) return features = [] for l in letters: features.append([int(x) for x in toFeature(l).split(' ')]) l.save(TEMP_DIR + '%d.jpg' % len(features)) pre = clf.predict(features) code = ''.join([chr(x + ord('A')) for x in pre]) print(code) def test_tmp_dir(): filenames = readAllFiles(TEMP_DIR) for file in filenames: img = Image.open(TEMP_DIR + file) bimg = toBin(img) bimg.save(TEMP_DIR + 'tmp_' + file) success, letters = cropLetters(bimg) if not success: print('Crop failed.') print(letters) return features = [] for l in letters: features.append([int(x) for x in toFeature(l).split(' ')]) # l.save(TEMP_DIR + '%d.jpg' % len(features)) pre = clf.predict(features) code = ''.join([chr(x + ord('A')) for x in pre]) print(code) SAVE_TO = 'model.pkl' def main(): global clf clf = joblib.load(SAVE_TO) test_onePic() # test_tmp_dir() if __name__ == '__main__': main()
mit
RPGOne/Skynet
scikit-learn-0.18.1/sklearn/linear_model/tests/test_ransac.py
52
17482
from scipy import sparse import numpy as np from scipy import sparse from numpy.testing import assert_equal, assert_raises from numpy.testing import assert_array_almost_equal from numpy.testing import assert_array_equal from sklearn.utils import check_random_state from sklearn.utils.testing import assert_raises_regexp from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_warns from sklearn.utils.testing import assert_almost_equal from sklearn.linear_model import LinearRegression, RANSACRegressor, Lasso from sklearn.linear_model.ransac import _dynamic_max_trials # Generate coordinates of line X = np.arange(-200, 200) y = 0.2 * X + 20 data = np.column_stack([X, y]) # Add some faulty data outliers = np.array((10, 30, 200)) data[outliers[0], :] = (1000, 1000) data[outliers[1], :] = (-1000, -1000) data[outliers[2], :] = (-100, -50) X = data[:, 0][:, np.newaxis] y = data[:, 1] def test_ransac_inliers_outliers(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) # Estimate parameters of corrupted data ransac_estimator.fit(X, y) # Ground truth / reference inlier mask ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_is_data_valid(): def is_data_valid(X, y): assert_equal(X.shape[0], 2) assert_equal(y.shape[0], 2) return False X = np.random.rand(10, 2) y = np.random.rand(10, 1) base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, is_data_valid=is_data_valid, random_state=0) assert_raises(ValueError, ransac_estimator.fit, X, y) def test_ransac_is_model_valid(): def is_model_valid(estimator, X, y): assert_equal(X.shape[0], 2) assert_equal(y.shape[0], 2) return False base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, is_model_valid=is_model_valid, random_state=0) assert_raises(ValueError, ransac_estimator.fit, X, y) def test_ransac_max_trials(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, max_trials=0, random_state=0) assert_raises(ValueError, ransac_estimator.fit, X, y) ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, max_trials=11, random_state=0) assert getattr(ransac_estimator, 'n_trials_', None) is None ransac_estimator.fit(X, y) assert_equal(ransac_estimator.n_trials_, 2) def test_ransac_stop_n_inliers(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, stop_n_inliers=2, random_state=0) ransac_estimator.fit(X, y) assert_equal(ransac_estimator.n_trials_, 1) def test_ransac_stop_score(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, stop_score=0, random_state=0) ransac_estimator.fit(X, y) assert_equal(ransac_estimator.n_trials_, 1) def test_ransac_score(): X = np.arange(100)[:, None] y = np.zeros((100, )) y[0] = 1 y[1] = 100 base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=0.5, random_state=0) ransac_estimator.fit(X, y) assert_equal(ransac_estimator.score(X[2:], y[2:]), 1) assert_less(ransac_estimator.score(X[:2], y[:2]), 1) def test_ransac_predict(): X = np.arange(100)[:, None] y = np.zeros((100, )) y[0] = 1 y[1] = 100 base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=0.5, random_state=0) ransac_estimator.fit(X, y) assert_equal(ransac_estimator.predict(X), np.zeros(100)) def test_ransac_resid_thresh_no_inliers(): # When residual_threshold=0.0 there are no inliers and a # ValueError with a message should be raised base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=0.0, random_state=0) assert_raises_regexp(ValueError, "No inliers.*residual_threshold.*0\.0", ransac_estimator.fit, X, y) def test_ransac_sparse_coo(): X_sparse = sparse.coo_matrix(X) base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_estimator.fit(X_sparse, y) ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_sparse_csr(): X_sparse = sparse.csr_matrix(X) base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_estimator.fit(X_sparse, y) ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_sparse_csc(): X_sparse = sparse.csc_matrix(X) base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_estimator.fit(X_sparse, y) ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_none_estimator(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_none_estimator = RANSACRegressor(None, 2, 5, random_state=0) ransac_estimator.fit(X, y) ransac_none_estimator.fit(X, y) assert_array_almost_equal(ransac_estimator.predict(X), ransac_none_estimator.predict(X)) def test_ransac_min_n_samples(): base_estimator = LinearRegression() ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2. / X.shape[0], residual_threshold=5, random_state=0) ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=-1, residual_threshold=5, random_state=0) ransac_estimator4 = RANSACRegressor(base_estimator, min_samples=5.2, residual_threshold=5, random_state=0) ransac_estimator5 = RANSACRegressor(base_estimator, min_samples=2.0, residual_threshold=5, random_state=0) ransac_estimator6 = RANSACRegressor(base_estimator, residual_threshold=5, random_state=0) ransac_estimator7 = RANSACRegressor(base_estimator, min_samples=X.shape[0] + 1, residual_threshold=5, random_state=0) ransac_estimator1.fit(X, y) ransac_estimator2.fit(X, y) ransac_estimator5.fit(X, y) ransac_estimator6.fit(X, y) assert_array_almost_equal(ransac_estimator1.predict(X), ransac_estimator2.predict(X)) assert_array_almost_equal(ransac_estimator1.predict(X), ransac_estimator5.predict(X)) assert_array_almost_equal(ransac_estimator1.predict(X), ransac_estimator6.predict(X)) assert_raises(ValueError, ransac_estimator3.fit, X, y) assert_raises(ValueError, ransac_estimator4.fit, X, y) assert_raises(ValueError, ransac_estimator7.fit, X, y) def test_ransac_multi_dimensional_targets(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) # 3-D target values yyy = np.column_stack([y, y, y]) # Estimate parameters of corrupted data ransac_estimator.fit(X, yyy) # Ground truth / reference inlier mask ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) # XXX: Remove in 0.20 def test_ransac_residual_metric(): residual_metric1 = lambda dy: np.sum(np.abs(dy), axis=1) residual_metric2 = lambda dy: np.sum(dy ** 2, axis=1) yyy = np.column_stack([y, y, y]) base_estimator = LinearRegression() ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0, residual_metric=residual_metric1) ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0, residual_metric=residual_metric2) # multi-dimensional ransac_estimator0.fit(X, yyy) assert_warns(DeprecationWarning, ransac_estimator1.fit, X, yyy) assert_warns(DeprecationWarning, ransac_estimator2.fit, X, yyy) assert_array_almost_equal(ransac_estimator0.predict(X), ransac_estimator1.predict(X)) assert_array_almost_equal(ransac_estimator0.predict(X), ransac_estimator2.predict(X)) # one-dimensional ransac_estimator0.fit(X, y) assert_warns(DeprecationWarning, ransac_estimator2.fit, X, y) assert_array_almost_equal(ransac_estimator0.predict(X), ransac_estimator2.predict(X)) def test_ransac_residual_loss(): loss_multi1 = lambda y_true, y_pred: np.sum(np.abs(y_true - y_pred), axis=1) loss_multi2 = lambda y_true, y_pred: np.sum((y_true - y_pred) ** 2, axis=1) loss_mono = lambda y_true, y_pred : np.abs(y_true - y_pred) yyy = np.column_stack([y, y, y]) base_estimator = LinearRegression() ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0) ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0, loss=loss_multi1) ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0, loss=loss_multi2) # multi-dimensional ransac_estimator0.fit(X, yyy) ransac_estimator1.fit(X, yyy) ransac_estimator2.fit(X, yyy) assert_array_almost_equal(ransac_estimator0.predict(X), ransac_estimator1.predict(X)) assert_array_almost_equal(ransac_estimator0.predict(X), ransac_estimator2.predict(X)) # one-dimensional ransac_estimator0.fit(X, y) ransac_estimator2.loss = loss_mono ransac_estimator2.fit(X, y) assert_array_almost_equal(ransac_estimator0.predict(X), ransac_estimator2.predict(X)) ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=2, residual_threshold=5, random_state=0, loss="squared_loss") ransac_estimator3.fit(X, y) assert_array_almost_equal(ransac_estimator0.predict(X), ransac_estimator2.predict(X)) def test_ransac_default_residual_threshold(): base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, random_state=0) # Estimate parameters of corrupted data ransac_estimator.fit(X, y) # Ground truth / reference inlier mask ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) def test_ransac_dynamic_max_trials(): # Numbers hand-calculated and confirmed on page 119 (Table 4.3) in # Hartley, R.~I. and Zisserman, A., 2004, # Multiple View Geometry in Computer Vision, Second Edition, # Cambridge University Press, ISBN: 0521540518 # e = 0%, min_samples = X assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1) # e = 5%, min_samples = 2 assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2) # e = 10%, min_samples = 2 assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3) # e = 30%, min_samples = 2 assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7) # e = 50%, min_samples = 2 assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17) # e = 5%, min_samples = 8 assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5) # e = 10%, min_samples = 8 assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9) # e = 30%, min_samples = 8 assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78) # e = 50%, min_samples = 8 assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177) # e = 0%, min_samples = 10 assert_equal(_dynamic_max_trials(1, 100, 10, 0), 0) assert_equal(_dynamic_max_trials(1, 100, 10, 1), float('inf')) base_estimator = LinearRegression() ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, stop_probability=-0.1) assert_raises(ValueError, ransac_estimator.fit, X, y) ransac_estimator = RANSACRegressor(base_estimator, min_samples=2, stop_probability=1.1) assert_raises(ValueError, ransac_estimator.fit, X, y) def test_ransac_fit_sample_weight(): ransac_estimator = RANSACRegressor(random_state=0) n_samples = y.shape[0] weights = np.ones(n_samples) ransac_estimator.fit(X, y, weights) # sanity check assert_equal(ransac_estimator.inlier_mask_.shape[0], n_samples) ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_ ).astype(np.bool_) ref_inlier_mask[outliers] = False # check that mask is correct assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask) # check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where # X = X1 repeated n1 times, X2 repeated n2 times and so forth random_state = check_random_state(0) X_ = random_state.randint(0, 200, [10, 1]) y_ = np.ndarray.flatten(0.2 * X_ + 2) sample_weight = random_state.randint(0, 10, 10) outlier_X = random_state.randint(0, 1000, [1, 1]) outlier_weight = random_state.randint(0, 10, 1) outlier_y = random_state.randint(-1000, 0, 1) X_flat = np.append(np.repeat(X_, sample_weight, axis=0), np.repeat(outlier_X, outlier_weight, axis=0), axis=0) y_flat = np.ndarray.flatten(np.append(np.repeat(y_, sample_weight, axis=0), np.repeat(outlier_y, outlier_weight, axis=0), axis=0)) ransac_estimator.fit(X_flat, y_flat) ref_coef_ = ransac_estimator.estimator_.coef_ sample_weight = np.append(sample_weight, outlier_weight) X_ = np.append(X_, outlier_X, axis=0) y_ = np.append(y_, outlier_y) ransac_estimator.fit(X_, y_, sample_weight) assert_almost_equal(ransac_estimator.estimator_.coef_, ref_coef_) # check that if base_estimator.fit doesn't support # sample_weight, raises error base_estimator = Lasso() ransac_estimator = RANSACRegressor(base_estimator) assert_raises(ValueError, ransac_estimator.fit, X, y, weights)
bsd-3-clause
jjx02230808/project0223
benchmarks/bench_lasso.py
297
3305
""" Benchmarks of Lasso vs LassoLars First, we fix a training set and increase the number of samples. Then we plot the computation time as function of the number of samples. In the second benchmark, we increase the number of dimensions of the training set. Then we plot the computation time as function of the number of dimensions. In both cases, only 10% of the features are informative. """ import gc from time import time import numpy as np from sklearn.datasets.samples_generator import make_regression def compute_bench(alpha, n_samples, n_features, precompute): lasso_results = [] lars_lasso_results = [] it = 0 for ns in n_samples: for nf in n_features: it += 1 print('==================') print('Iteration %s of %s' % (it, max(len(n_samples), len(n_features)))) print('==================') n_informative = nf // 10 X, Y, coef_ = make_regression(n_samples=ns, n_features=nf, n_informative=n_informative, noise=0.1, coef=True) X /= np.sqrt(np.sum(X ** 2, axis=0)) # Normalize data gc.collect() print("- benchmarking Lasso") clf = Lasso(alpha=alpha, fit_intercept=False, precompute=precompute) tstart = time() clf.fit(X, Y) lasso_results.append(time() - tstart) gc.collect() print("- benchmarking LassoLars") clf = LassoLars(alpha=alpha, fit_intercept=False, normalize=False, precompute=precompute) tstart = time() clf.fit(X, Y) lars_lasso_results.append(time() - tstart) return lasso_results, lars_lasso_results if __name__ == '__main__': from sklearn.linear_model import Lasso, LassoLars import pylab as pl alpha = 0.01 # regularization parameter n_features = 10 list_n_samples = np.linspace(100, 1000000, 5).astype(np.int) lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples, [n_features], precompute=True) pl.figure('scikit-learn LASSO benchmark results') pl.subplot(211) pl.plot(list_n_samples, lasso_results, 'b-', label='Lasso') pl.plot(list_n_samples, lars_lasso_results, 'r-', label='LassoLars') pl.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features, alpha)) pl.legend(loc='upper left') pl.xlabel('number of samples') pl.ylabel('Time (s)') pl.axis('tight') n_samples = 2000 list_n_features = np.linspace(500, 3000, 5).astype(np.int) lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples], list_n_features, precompute=False) pl.subplot(212) pl.plot(list_n_features, lasso_results, 'b-', label='Lasso') pl.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars') pl.title('%d samples, alpha=%s' % (n_samples, alpha)) pl.legend(loc='upper left') pl.xlabel('number of features') pl.ylabel('Time (s)') pl.axis('tight') pl.show()
bsd-3-clause
ChanderG/scikit-learn
sklearn/metrics/tests/test_regression.py
272
6066
from __future__ import division, print_function import numpy as np from itertools import product from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.metrics import explained_variance_score from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_squared_error from sklearn.metrics import median_absolute_error from sklearn.metrics import r2_score from sklearn.metrics.regression import _check_reg_targets def test_regression_metrics(n_samples=50): y_true = np.arange(n_samples) y_pred = y_true + 1 assert_almost_equal(mean_squared_error(y_true, y_pred), 1.) assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.) assert_almost_equal(median_absolute_error(y_true, y_pred), 1.) assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2) assert_almost_equal(explained_variance_score(y_true, y_pred), 1.) def test_multioutput_regression(): y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]]) y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]]) error = mean_squared_error(y_true, y_pred) assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.) # mean_absolute_error and mean_squared_error are equal because # it is a binary problem. error = mean_absolute_error(y_true, y_pred) assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.) error = r2_score(y_true, y_pred, multioutput='variance_weighted') assert_almost_equal(error, 1. - 5. / 2) error = r2_score(y_true, y_pred, multioutput='uniform_average') assert_almost_equal(error, -.875) def test_regression_metrics_at_limits(): assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2) assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2) assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2) assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2) assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2) def test__check_reg_targets(): # All of length 3 EXAMPLES = [ ("continuous", [1, 2, 3], 1), ("continuous", [[1], [2], [3]], 1), ("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2), ("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2), ("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3), ] for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES, repeat=2): if type1 == type2 and n_out1 == n_out2: y_type, y_check1, y_check2, multioutput = _check_reg_targets( y1, y2, None) assert_equal(type1, y_type) if type1 == 'continuous': assert_array_equal(y_check1, np.reshape(y1, (-1, 1))) assert_array_equal(y_check2, np.reshape(y2, (-1, 1))) else: assert_array_equal(y_check1, y1) assert_array_equal(y_check2, y2) else: assert_raises(ValueError, _check_reg_targets, y1, y2, None) def test_regression_multioutput_array(): y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]] y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]] mse = mean_squared_error(y_true, y_pred, multioutput='raw_values') mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values') r = r2_score(y_true, y_pred, multioutput='raw_values') evs = explained_variance_score(y_true, y_pred, multioutput='raw_values') assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2) assert_array_almost_equal(mae, [0.25, 0.625], decimal=2) assert_array_almost_equal(r, [0.95, 0.93], decimal=2) assert_array_almost_equal(evs, [0.95, 0.93], decimal=2) # mean_absolute_error and mean_squared_error are equal because # it is a binary problem. y_true = [[0, 0]]*4 y_pred = [[1, 1]]*4 mse = mean_squared_error(y_true, y_pred, multioutput='raw_values') mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values') r = r2_score(y_true, y_pred, multioutput='raw_values') assert_array_almost_equal(mse, [1., 1.], decimal=2) assert_array_almost_equal(mae, [1., 1.], decimal=2) assert_array_almost_equal(r, [0., 0.], decimal=2) r = r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='raw_values') assert_array_almost_equal(r, [0, -3.5], decimal=2) assert_equal(np.mean(r), r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='uniform_average')) evs = explained_variance_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='raw_values') assert_array_almost_equal(evs, [0, -1.25], decimal=2) # Checking for the condition in which both numerator and denominator is # zero. y_true = [[1, 3], [-1, 2]] y_pred = [[1, 4], [-1, 1]] r2 = r2_score(y_true, y_pred, multioutput='raw_values') assert_array_almost_equal(r2, [1., -3.], decimal=2) assert_equal(np.mean(r2), r2_score(y_true, y_pred, multioutput='uniform_average')) evs = explained_variance_score(y_true, y_pred, multioutput='raw_values') assert_array_almost_equal(evs, [1., -3.], decimal=2) assert_equal(np.mean(evs), explained_variance_score(y_true, y_pred)) def test_regression_custom_weights(): y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]] y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]] msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6]) maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6]) rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6]) evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6]) assert_almost_equal(msew, 0.39, decimal=2) assert_almost_equal(maew, 0.475, decimal=3) assert_almost_equal(rw, 0.94, decimal=2) assert_almost_equal(evsw, 0.94, decimal=2)
bsd-3-clause
JPFrancoia/scikit-learn
sklearn/preprocessing/data.py
13
70436
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Mathieu Blondel <mathieu@mblondel.org> # Olivier Grisel <olivier.grisel@ensta.org> # Andreas Mueller <amueller@ais.uni-bonn.de> # Eric Martin <eric@ericmart.in> # Giorgio Patrini <giorgio.patrini@anu.edu.au> # License: BSD 3 clause from itertools import chain, combinations import numbers import warnings import numpy as np from scipy import sparse from ..base import BaseEstimator, TransformerMixin from ..externals import six from ..utils import check_array from ..utils import deprecated from ..utils.extmath import row_norms from ..utils.extmath import _incremental_mean_and_var from ..utils.fixes import combinations_with_replacement as combinations_w_r from ..utils.fixes import bincount from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1, inplace_csr_row_normalize_l2) from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis, incr_mean_variance_axis, min_max_axis) from ..utils.validation import check_is_fitted, FLOAT_DTYPES zip = six.moves.zip map = six.moves.map range = six.moves.range __all__ = [ 'Binarizer', 'KernelCenterer', 'MinMaxScaler', 'MaxAbsScaler', 'Normalizer', 'OneHotEncoder', 'RobustScaler', 'StandardScaler', 'add_dummy_feature', 'binarize', 'normalize', 'scale', 'robust_scale', 'maxabs_scale', 'minmax_scale', ] DEPRECATION_MSG_1D = ( "Passing 1d arrays as data is deprecated in 0.17 and will " "raise ValueError in 0.19. Reshape your data either using " "X.reshape(-1, 1) if your data has a single feature or " "X.reshape(1, -1) if it contains a single sample." ) def _handle_zeros_in_scale(scale, copy=True): ''' Makes sure that whenever scale is zero, we handle it correctly. This happens in most scalers when we have constant features.''' # if we are fitting on 1D arrays, scale might be a scalar if np.isscalar(scale): if scale == .0: scale = 1. return scale elif isinstance(scale, np.ndarray): if copy: # New array to avoid side-effects scale = scale.copy() scale[scale == 0.0] = 1.0 return scale def scale(X, axis=0, with_mean=True, with_std=True, copy=True): """Standardize a dataset along any axis Center to the mean and component wise scale to unit variance. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- X : {array-like, sparse matrix} The data to center and scale. axis : int (0 by default) axis used to compute the means and standard deviations along. If 0, independently standardize each feature, otherwise (if 1) standardize each sample. with_mean : boolean, True by default If True, center the data before scaling. with_std : boolean, True by default If True, scale the data to unit variance (or equivalently, unit standard deviation). copy : boolean, optional, default True set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSC matrix and if axis is 1). Notes ----- This implementation will refuse to center scipy.sparse matrices since it would make them non-sparse and would potentially crash the program with memory exhaustion problems. Instead the caller is expected to either set explicitly `with_mean=False` (in that case, only variance scaling will be performed on the features of the CSC matrix) or to call `X.toarray()` if he/she expects the materialized dense array to fit in memory. To avoid memory copy the caller should pass a CSC matrix. See also -------- StandardScaler: Performs scaling to unit variance using the``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`). """ # noqa X = check_array(X, accept_sparse='csc', copy=copy, ensure_2d=False, warn_on_dtype=True, estimator='the scale function', dtype=FLOAT_DTYPES) if sparse.issparse(X): if with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` instead" " See docstring for motivation and alternatives.") if axis != 0: raise ValueError("Can only scale sparse matrix on axis=0, " " got axis=%d" % axis) if with_std: _, var = mean_variance_axis(X, axis=0) var = _handle_zeros_in_scale(var, copy=False) inplace_column_scale(X, 1 / np.sqrt(var)) else: X = np.asarray(X) if with_mean: mean_ = np.mean(X, axis) if with_std: scale_ = np.std(X, axis) # Xr is a view on the original array that enables easy use of # broadcasting on the axis in which we are interested in Xr = np.rollaxis(X, axis) if with_mean: Xr -= mean_ mean_1 = Xr.mean(axis=0) # Verify that mean_1 is 'close to zero'. If X contains very # large values, mean_1 can also be very large, due to a lack of # precision of mean_. In this case, a pre-scaling of the # concerned feature is efficient, for instance by its mean or # maximum. if not np.allclose(mean_1, 0): warnings.warn("Numerical issues were encountered " "when centering the data " "and might not be solved. Dataset may " "contain too large values. You may need " "to prescale your features.") Xr -= mean_1 if with_std: scale_ = _handle_zeros_in_scale(scale_, copy=False) Xr /= scale_ if with_mean: mean_2 = Xr.mean(axis=0) # If mean_2 is not 'close to zero', it comes from the fact that # scale_ is very small so that mean_2 = mean_1/scale_ > 0, even # if mean_1 was close to zero. The problem is thus essentially # due to the lack of precision of mean_. A solution is then to # subtract the mean again: if not np.allclose(mean_2, 0): warnings.warn("Numerical issues were encountered " "when scaling the data " "and might not be solved. The standard " "deviation of the data is probably " "very close to 0. ") Xr -= mean_2 return X class MinMaxScaler(BaseEstimator, TransformerMixin): """Transforms features by scaling each feature to a given range. This estimator scales and translates each feature individually such that it is in the given range on the training set, i.e. between zero and one. The transformation is given by:: X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0)) X_scaled = X_std * (max - min) + min where min, max = feature_range. This transformation is often used as an alternative to zero mean, unit variance scaling. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- feature_range : tuple (min, max), default=(0, 1) Desired range of transformed data. copy : boolean, optional, default True Set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array). Attributes ---------- min_ : ndarray, shape (n_features,) Per feature adjustment for minimum. scale_ : ndarray, shape (n_features,) Per feature relative scaling of the data. .. versionadded:: 0.17 *scale_* attribute. data_min_ : ndarray, shape (n_features,) Per feature minimum seen in the data .. versionadded:: 0.17 *data_min_* instead of deprecated *data_min*. data_max_ : ndarray, shape (n_features,) Per feature maximum seen in the data .. versionadded:: 0.17 *data_max_* instead of deprecated *data_max*. data_range_ : ndarray, shape (n_features,) Per feature range ``(data_max_ - data_min_)`` seen in the data .. versionadded:: 0.17 *data_range_* instead of deprecated *data_range*. See also -------- minmax_scale: Equivalent function without the object oriented API. """ def __init__(self, feature_range=(0, 1), copy=True): self.feature_range = feature_range self.copy = copy @property @deprecated("Attribute data_range will be removed in " "0.19. Use ``data_range_`` instead") def data_range(self): return self.data_range_ @property @deprecated("Attribute data_min will be removed in " "0.19. Use ``data_min_`` instead") def data_min(self): return self.data_min_ def _reset(self): """Reset internal data-dependent state of the scaler, if necessary. __init__ parameters are not touched. """ # Checking one attribute is enough, becase they are all set together # in partial_fit if hasattr(self, 'scale_'): del self.scale_ del self.min_ del self.n_samples_seen_ del self.data_min_ del self.data_max_ del self.data_range_ def fit(self, X, y=None): """Compute the minimum and maximum to be used for later scaling. Parameters ---------- X : array-like, shape [n_samples, n_features] The data used to compute the per-feature minimum and maximum used for later scaling along the features axis. """ # Reset internal state before fitting self._reset() return self.partial_fit(X, y) def partial_fit(self, X, y=None): """Online computation of min and max on X for later scaling. All of X is processed as a single batch. This is intended for cases when `fit` is not feasible due to very large number of `n_samples` or because X is read from a continuous stream. Parameters ---------- X : array-like, shape [n_samples, n_features] The data used to compute the mean and standard deviation used for later scaling along the features axis. y : Passthrough for ``Pipeline`` compatibility. """ feature_range = self.feature_range if feature_range[0] >= feature_range[1]: raise ValueError("Minimum of desired feature range must be smaller" " than maximum. Got %s." % str(feature_range)) if sparse.issparse(X): raise TypeError("MinMaxScaler does no support sparse input. " "You may consider to use MaxAbsScaler instead.") X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) data_min = np.min(X, axis=0) data_max = np.max(X, axis=0) # First pass if not hasattr(self, 'n_samples_seen_'): self.n_samples_seen_ = X.shape[0] # Next steps else: data_min = np.minimum(self.data_min_, data_min) data_max = np.maximum(self.data_max_, data_max) self.n_samples_seen_ += X.shape[0] data_range = data_max - data_min self.scale_ = ((feature_range[1] - feature_range[0]) / _handle_zeros_in_scale(data_range)) self.min_ = feature_range[0] - data_min * self.scale_ self.data_min_ = data_min self.data_max_ = data_max self.data_range_ = data_range return self def transform(self, X): """Scaling features of X according to feature_range. Parameters ---------- X : array-like, shape [n_samples, n_features] Input data that will be transformed. """ check_is_fitted(self, 'scale_') X = check_array(X, copy=self.copy, ensure_2d=False, dtype=FLOAT_DTYPES) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) X *= self.scale_ X += self.min_ return X def inverse_transform(self, X): """Undo the scaling of X according to feature_range. Parameters ---------- X : array-like, shape [n_samples, n_features] Input data that will be transformed. It cannot be sparse. """ check_is_fitted(self, 'scale_') X = check_array(X, copy=self.copy, ensure_2d=False, dtype=FLOAT_DTYPES) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) X -= self.min_ X /= self.scale_ return X def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True): """Transforms features by scaling each feature to a given range. This estimator scales and translates each feature individually such that it is in the given range on the training set, i.e. between zero and one. The transformation is given by:: X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0)) X_scaled = X_std * (max - min) + min where min, max = feature_range. This transformation is often used as an alternative to zero mean, unit variance scaling. Read more in the :ref:`User Guide <preprocessing_scaler>`. .. versionadded:: 0.17 *minmax_scale* function interface to :class:`sklearn.preprocessing.MinMaxScaler`. Parameters ---------- feature_range : tuple (min, max), default=(0, 1) Desired range of transformed data. axis : int (0 by default) axis used to scale along. If 0, independently scale each feature, otherwise (if 1) scale each sample. copy : boolean, optional, default is True Set to False to perform inplace scaling and avoid a copy (if the input is already a numpy array). See also -------- MinMaxScaler: Performs scaling to a given range using the``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`). """ # noqa # To allow retro-compatibility, we handle here the case of 1D-input # From 0.17, 1D-input are deprecated in scaler objects # Although, we want to allow the users to keep calling this function # with 1D-input. # Cast input to array, as we need to check ndim. Prior to 0.17, that was # done inside the scaler object fit_transform. # If copy is required, it will be done inside the scaler object. X = check_array(X, copy=False, ensure_2d=False, warn_on_dtype=True, dtype=FLOAT_DTYPES) original_ndim = X.ndim if original_ndim == 1: X = X.reshape(X.shape[0], 1) s = MinMaxScaler(feature_range=feature_range, copy=copy) if axis == 0: X = s.fit_transform(X) else: X = s.fit_transform(X.T).T if original_ndim == 1: X = X.ravel() return X class StandardScaler(BaseEstimator, TransformerMixin): """Standardize features by removing the mean and scaling to unit variance Centering and scaling happen independently on each feature by computing the relevant statistics on the samples in the training set. Mean and standard deviation are then stored to be used on later data using the `transform` method. Standardization of a dataset is a common requirement for many machine learning estimators: they might behave badly if the individual feature do not more or less look like standard normally distributed data (e.g. Gaussian with 0 mean and unit variance). For instance many elements used in the objective function of a learning algorithm (such as the RBF kernel of Support Vector Machines or the L1 and L2 regularizers of linear models) assume that all features are centered around 0 and have variance in the same order. If a feature has a variance that is orders of magnitude larger that others, it might dominate the objective function and make the estimator unable to learn from other features correctly as expected. This scaler can also be applied to sparse CSR or CSC matrices by passing `with_mean=False` to avoid breaking the sparsity structure of the data. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- with_mean : boolean, True by default If True, center the data before scaling. This does not work (and will raise an exception) when attempted on sparse matrices, because centering them entails building a dense matrix which in common use cases is likely to be too large to fit in memory. with_std : boolean, True by default If True, scale the data to unit variance (or equivalently, unit standard deviation). copy : boolean, optional, default True If False, try to avoid a copy and do inplace scaling instead. This is not guaranteed to always work inplace; e.g. if the data is not a NumPy array or scipy.sparse CSR matrix, a copy may still be returned. Attributes ---------- scale_ : ndarray, shape (n_features,) Per feature relative scaling of the data. .. versionadded:: 0.17 *scale_* is recommended instead of deprecated *std_*. mean_ : array of floats with shape [n_features] The mean value for each feature in the training set. var_ : array of floats with shape [n_features] The variance for each feature in the training set. Used to compute `scale_` n_samples_seen_ : int The number of samples processed by the estimator. Will be reset on new calls to fit, but increments across ``partial_fit`` calls. See also -------- scale: Equivalent function without the object oriented API. :class:`sklearn.decomposition.PCA` Further removes the linear correlation across features with 'whiten=True'. """ # noqa def __init__(self, copy=True, with_mean=True, with_std=True): self.with_mean = with_mean self.with_std = with_std self.copy = copy @property @deprecated("Attribute ``std_`` will be removed in 0.19. " "Use ``scale_`` instead") def std_(self): return self.scale_ def _reset(self): """Reset internal data-dependent state of the scaler, if necessary. __init__ parameters are not touched. """ # Checking one attribute is enough, becase they are all set together # in partial_fit if hasattr(self, 'scale_'): del self.scale_ del self.n_samples_seen_ del self.mean_ del self.var_ def fit(self, X, y=None): """Compute the mean and std to be used for later scaling. Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data used to compute the mean and standard deviation used for later scaling along the features axis. y: Passthrough for ``Pipeline`` compatibility. """ # Reset internal state before fitting self._reset() return self.partial_fit(X, y) def partial_fit(self, X, y=None): """Online computation of mean and std on X for later scaling. All of X is processed as a single batch. This is intended for cases when `fit` is not feasible due to very large number of `n_samples` or because X is read from a continuous stream. The algorithm for incremental mean and std is given in Equation 1.5a,b in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms for computing the sample variance: Analysis and recommendations." The American Statistician 37.3 (1983): 242-247: Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data used to compute the mean and standard deviation used for later scaling along the features axis. y: Passthrough for ``Pipeline`` compatibility. """ X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, ensure_2d=False, warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) # Even in the case of `with_mean=False`, we update the mean anyway # This is needed for the incremental computation of the var # See incr_mean_variance_axis and _incremental_mean_variance_axis if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` " "instead. See docstring for motivation and alternatives.") if self.with_std: # First pass if not hasattr(self, 'n_samples_seen_'): self.mean_, self.var_ = mean_variance_axis(X, axis=0) self.n_samples_seen_ = X.shape[0] # Next passes else: self.mean_, self.var_, self.n_samples_seen_ = \ incr_mean_variance_axis(X, axis=0, last_mean=self.mean_, last_var=self.var_, last_n=self.n_samples_seen_) else: self.mean_ = None self.var_ = None else: # First pass if not hasattr(self, 'n_samples_seen_'): self.mean_ = .0 self.n_samples_seen_ = 0 if self.with_std: self.var_ = .0 else: self.var_ = None self.mean_, self.var_, self.n_samples_seen_ = \ _incremental_mean_and_var(X, self.mean_, self.var_, self.n_samples_seen_) if self.with_std: self.scale_ = _handle_zeros_in_scale(np.sqrt(self.var_)) else: self.scale_ = None return self def transform(self, X, y=None, copy=None): """Perform standardization by centering and scaling Parameters ---------- X : array-like, shape [n_samples, n_features] The data used to scale along the features axis. """ check_is_fitted(self, 'scale_') copy = copy if copy is not None else self.copy X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False, warn_on_dtype=True, estimator=self, dtype=FLOAT_DTYPES) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot center sparse matrices: pass `with_mean=False` " "instead. See docstring for motivation and alternatives.") if self.scale_ is not None: inplace_column_scale(X, 1 / self.scale_) else: if self.with_mean: X -= self.mean_ if self.with_std: X /= self.scale_ return X def inverse_transform(self, X, copy=None): """Scale back the data to the original representation Parameters ---------- X : array-like, shape [n_samples, n_features] The data used to scale along the features axis. """ check_is_fitted(self, 'scale_') copy = copy if copy is not None else self.copy if sparse.issparse(X): if self.with_mean: raise ValueError( "Cannot uncenter sparse matrices: pass `with_mean=False` " "instead See docstring for motivation and alternatives.") if not sparse.isspmatrix_csr(X): X = X.tocsr() copy = False if copy: X = X.copy() if self.scale_ is not None: inplace_column_scale(X, self.scale_) else: X = np.asarray(X) if copy: X = X.copy() if self.with_std: X *= self.scale_ if self.with_mean: X += self.mean_ return X class MaxAbsScaler(BaseEstimator, TransformerMixin): """Scale each feature by its maximum absolute value. This estimator scales and translates each feature individually such that the maximal absolute value of each feature in the training set will be 1.0. It does not shift/center the data, and thus does not destroy any sparsity. This scaler can also be applied to sparse CSR or CSC matrices. .. versionadded:: 0.17 Parameters ---------- copy : boolean, optional, default is True Set to False to perform inplace scaling and avoid a copy (if the input is already a numpy array). Attributes ---------- scale_ : ndarray, shape (n_features,) Per feature relative scaling of the data. .. versionadded:: 0.17 *scale_* attribute. max_abs_ : ndarray, shape (n_features,) Per feature maximum absolute value. n_samples_seen_ : int The number of samples processed by the estimator. Will be reset on new calls to fit, but increments across ``partial_fit`` calls. See also -------- maxabs_scale: Equivalent function without the object oriented API. """ def __init__(self, copy=True): self.copy = copy def _reset(self): """Reset internal data-dependent state of the scaler, if necessary. __init__ parameters are not touched. """ # Checking one attribute is enough, becase they are all set together # in partial_fit if hasattr(self, 'scale_'): del self.scale_ del self.n_samples_seen_ del self.max_abs_ def fit(self, X, y=None): """Compute the maximum absolute value to be used for later scaling. Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data used to compute the per-feature minimum and maximum used for later scaling along the features axis. """ # Reset internal state before fitting self._reset() return self.partial_fit(X, y) def partial_fit(self, X, y=None): """Online computation of max absolute value of X for later scaling. All of X is processed as a single batch. This is intended for cases when `fit` is not feasible due to very large number of `n_samples` or because X is read from a continuous stream. Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data used to compute the mean and standard deviation used for later scaling along the features axis. y: Passthrough for ``Pipeline`` compatibility. """ X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) if sparse.issparse(X): mins, maxs = min_max_axis(X, axis=0) max_abs = np.maximum(np.abs(mins), np.abs(maxs)) else: max_abs = np.abs(X).max(axis=0) # First pass if not hasattr(self, 'n_samples_seen_'): self.n_samples_seen_ = X.shape[0] # Next passes else: max_abs = np.maximum(self.max_abs_, max_abs) self.n_samples_seen_ += X.shape[0] self.max_abs_ = max_abs self.scale_ = _handle_zeros_in_scale(max_abs) return self def transform(self, X, y=None): """Scale the data Parameters ---------- X : {array-like, sparse matrix} The data that should be scaled. """ check_is_fitted(self, 'scale_') X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) if sparse.issparse(X): inplace_column_scale(X, 1.0 / self.scale_) else: X /= self.scale_ return X def inverse_transform(self, X): """Scale back the data to the original representation Parameters ---------- X : {array-like, sparse matrix} The data that should be transformed back. """ check_is_fitted(self, 'scale_') X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) if sparse.issparse(X): inplace_column_scale(X, self.scale_) else: X *= self.scale_ return X def maxabs_scale(X, axis=0, copy=True): """Scale each feature to the [-1, 1] range without breaking the sparsity. This estimator scales each feature individually such that the maximal absolute value of each feature in the training set will be 1.0. This scaler can also be applied to sparse CSR or CSC matrices. Parameters ---------- axis : int (0 by default) axis used to scale along. If 0, independently scale each feature, otherwise (if 1) scale each sample. copy : boolean, optional, default is True Set to False to perform inplace scaling and avoid a copy (if the input is already a numpy array). See also -------- MaxAbsScaler: Performs scaling to the [-1, 1] range using the``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`). """ # noqa # To allow retro-compatibility, we handle here the case of 1D-input # From 0.17, 1D-input are deprecated in scaler objects # Although, we want to allow the users to keep calling this function # with 1D-input. # Cast input to array, as we need to check ndim. Prior to 0.17, that was # done inside the scaler object fit_transform. # If copy is required, it will be done inside the scaler object. X = check_array(X, accept_sparse=('csr', 'csc'), copy=False, ensure_2d=False, dtype=FLOAT_DTYPES) original_ndim = X.ndim if original_ndim == 1: X = X.reshape(X.shape[0], 1) s = MaxAbsScaler(copy=copy) if axis == 0: X = s.fit_transform(X) else: X = s.fit_transform(X.T).T if original_ndim == 1: X = X.ravel() return X class RobustScaler(BaseEstimator, TransformerMixin): """Scale features using statistics that are robust to outliers. This Scaler removes the median and scales the data according to the quantile range (defaults to IQR: Interquartile Range). The IQR is the range between the 1st quartile (25th quantile) and the 3rd quartile (75th quantile). Centering and scaling happen independently on each feature (or each sample, depending on the `axis` argument) by computing the relevant statistics on the samples in the training set. Median and interquartile range are then stored to be used on later data using the `transform` method. Standardization of a dataset is a common requirement for many machine learning estimators. Typically this is done by removing the mean and scaling to unit variance. However, outliers can often influence the sample mean / variance in a negative way. In such cases, the median and the interquartile range often give better results. .. versionadded:: 0.17 Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- with_centering : boolean, True by default If True, center the data before scaling. This does not work (and will raise an exception) when attempted on sparse matrices, because centering them entails building a dense matrix which in common use cases is likely to be too large to fit in memory. with_scaling : boolean, True by default If True, scale the data to interquartile range. quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0 Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR Quantile range used to calculate ``scale_``. .. versionadded:: 0.18 copy : boolean, optional, default is True If False, try to avoid a copy and do inplace scaling instead. This is not guaranteed to always work inplace; e.g. if the data is not a NumPy array or scipy.sparse CSR matrix, a copy may still be returned. Attributes ---------- center_ : array of floats The median value for each feature in the training set. scale_ : array of floats The (scaled) interquartile range for each feature in the training set. .. versionadded:: 0.17 *scale_* attribute. See also -------- robust_scale: Equivalent function without the object oriented API. :class:`sklearn.decomposition.PCA` Further removes the linear correlation across features with 'whiten=True'. Notes ----- See examples/preprocessing/plot_robust_scaling.py for an example. https://en.wikipedia.org/wiki/Median_(statistics) https://en.wikipedia.org/wiki/Interquartile_range """ def __init__(self, with_centering=True, with_scaling=True, quantile_range=(25.0, 75.0), copy=True): self.with_centering = with_centering self.with_scaling = with_scaling self.quantile_range = quantile_range self.copy = copy def _check_array(self, X, copy): """Makes sure centering is not enabled for sparse matrices.""" X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy, ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) if sparse.issparse(X): if self.with_centering: raise ValueError( "Cannot center sparse matrices: use `with_centering=False`" " instead. See docstring for motivation and alternatives.") return X def fit(self, X, y=None): """Compute the median and quantiles to be used for scaling. Parameters ---------- X : array-like, shape [n_samples, n_features] The data used to compute the median and quantiles used for later scaling along the features axis. """ if sparse.issparse(X): raise TypeError("RobustScaler cannot be fitted on sparse inputs") X = self._check_array(X, self.copy) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) if self.with_centering: self.center_ = np.median(X, axis=0) if self.with_scaling: q_min, q_max = self.quantile_range if not 0 <= q_min <= q_max <= 100: raise ValueError("Invalid quantile range: %s" % str(self.quantile_range)) q = np.percentile(X, self.quantile_range, axis=0) self.scale_ = (q[1] - q[0]) self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False) return self def transform(self, X, y=None): """Center and scale the data Parameters ---------- X : array-like The data used to scale along the specified axis. """ if self.with_centering: check_is_fitted(self, 'center_') if self.with_scaling: check_is_fitted(self, 'scale_') X = self._check_array(X, self.copy) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) if sparse.issparse(X): if self.with_scaling: inplace_column_scale(X, 1.0 / self.scale_) else: if self.with_centering: X -= self.center_ if self.with_scaling: X /= self.scale_ return X def inverse_transform(self, X): """Scale back the data to the original representation Parameters ---------- X : array-like The data used to scale along the specified axis. """ if self.with_centering: check_is_fitted(self, 'center_') if self.with_scaling: check_is_fitted(self, 'scale_') X = self._check_array(X, self.copy) if X.ndim == 1: warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning) if sparse.issparse(X): if self.with_scaling: inplace_column_scale(X, self.scale_) else: if self.with_scaling: X *= self.scale_ if self.with_centering: X += self.center_ return X def robust_scale(X, axis=0, with_centering=True, with_scaling=True, quantile_range=(25.0, 75.0), copy=True): """Standardize a dataset along any axis Center to the median and component wise scale according to the interquartile range. Read more in the :ref:`User Guide <preprocessing_scaler>`. Parameters ---------- X : array-like The data to center and scale. axis : int (0 by default) axis used to compute the medians and IQR along. If 0, independently scale each feature, otherwise (if 1) scale each sample. with_centering : boolean, True by default If True, center the data before scaling. with_scaling : boolean, True by default If True, scale the data to unit variance (or equivalently, unit standard deviation). quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0 Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR Quantile range used to calculate ``scale_``. .. versionadded:: 0.18 copy : boolean, optional, default is True set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix and if axis is 1). Notes ----- This implementation will refuse to center scipy.sparse matrices since it would make them non-sparse and would potentially crash the program with memory exhaustion problems. Instead the caller is expected to either set explicitly `with_centering=False` (in that case, only variance scaling will be performed on the features of the CSR matrix) or to call `X.toarray()` if he/she expects the materialized dense array to fit in memory. To avoid memory copy the caller should pass a CSR matrix. See also -------- RobustScaler: Performs centering and scaling using the ``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`). """ s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling, quantile_range=quantile_range, copy=copy) if axis == 0: return s.fit_transform(X) else: return s.fit_transform(X.T).T class PolynomialFeatures(BaseEstimator, TransformerMixin): """Generate polynomial and interaction features. Generate a new feature matrix consisting of all polynomial combinations of the features with degree less than or equal to the specified degree. For example, if an input sample is two dimensional and of the form [a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2]. Parameters ---------- degree : integer The degree of the polynomial features. Default = 2. interaction_only : boolean, default = False If true, only interaction features are produced: features that are products of at most ``degree`` *distinct* input features (so not ``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.). include_bias : boolean If True (default), then include a bias column, the feature in which all polynomial powers are zero (i.e. a column of ones - acts as an intercept term in a linear model). Examples -------- >>> X = np.arange(6).reshape(3, 2) >>> X array([[0, 1], [2, 3], [4, 5]]) >>> poly = PolynomialFeatures(2) >>> poly.fit_transform(X) array([[ 1., 0., 1., 0., 0., 1.], [ 1., 2., 3., 4., 6., 9.], [ 1., 4., 5., 16., 20., 25.]]) >>> poly = PolynomialFeatures(interaction_only=True) >>> poly.fit_transform(X) array([[ 1., 0., 1., 0.], [ 1., 2., 3., 6.], [ 1., 4., 5., 20.]]) Attributes ---------- powers_ : array, shape (n_output_features, n_input_features) powers_[i, j] is the exponent of the jth input in the ith output. n_input_features_ : int The total number of input features. n_output_features_ : int The total number of polynomial output features. The number of output features is computed by iterating over all suitably sized combinations of input features. Notes ----- Be aware that the number of features in the output array scales polynomially in the number of features of the input array, and exponentially in the degree. High degrees can cause overfitting. See :ref:`examples/linear_model/plot_polynomial_interpolation.py <sphx_glr_auto_examples_linear_model_plot_polynomial_interpolation.py>` """ def __init__(self, degree=2, interaction_only=False, include_bias=True): self.degree = degree self.interaction_only = interaction_only self.include_bias = include_bias @staticmethod def _combinations(n_features, degree, interaction_only, include_bias): comb = (combinations if interaction_only else combinations_w_r) start = int(not include_bias) return chain.from_iterable(comb(range(n_features), i) for i in range(start, degree + 1)) @property def powers_(self): check_is_fitted(self, 'n_input_features_') combinations = self._combinations(self.n_input_features_, self.degree, self.interaction_only, self.include_bias) return np.vstack(bincount(c, minlength=self.n_input_features_) for c in combinations) def get_feature_names(self, input_features=None): """ Return feature names for output features Parameters ---------- input_features : list of string, length n_features, optional String names for input features if available. By default, "x0", "x1", ... "xn_features" is used. Returns ------- output_feature_names : list of string, length n_output_features """ powers = self.powers_ if input_features is None: input_features = ['x%d' % i for i in range(powers.shape[1])] feature_names = [] for row in powers: inds = np.where(row)[0] if len(inds): name = " ".join("%s^%d" % (input_features[ind], exp) if exp != 1 else input_features[ind] for ind, exp in zip(inds, row[inds])) else: name = "1" feature_names.append(name) return feature_names def fit(self, X, y=None): """ Compute number of output features. """ n_samples, n_features = check_array(X).shape combinations = self._combinations(n_features, self.degree, self.interaction_only, self.include_bias) self.n_input_features_ = n_features self.n_output_features_ = sum(1 for _ in combinations) return self def transform(self, X, y=None): """Transform data to polynomial features Parameters ---------- X : array-like, shape [n_samples, n_features] The data to transform, row by row. Returns ------- XP : np.ndarray shape [n_samples, NP] The matrix of features, where NP is the number of polynomial features generated from the combination of inputs. """ check_is_fitted(self, ['n_input_features_', 'n_output_features_']) X = check_array(X, dtype=FLOAT_DTYPES) n_samples, n_features = X.shape if n_features != self.n_input_features_: raise ValueError("X shape does not match training shape") # allocate output data XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype) combinations = self._combinations(n_features, self.degree, self.interaction_only, self.include_bias) for i, c in enumerate(combinations): XP[:, i] = X[:, c].prod(1) return XP def normalize(X, norm='l2', axis=1, copy=True, return_norm=False): """Scale input vectors individually to unit norm (vector length). Read more in the :ref:`User Guide <preprocessing_normalization>`. Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data to normalize, element by element. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. norm : 'l1', 'l2', or 'max', optional ('l2' by default) The norm to use to normalize each non zero sample (or each non-zero feature if axis is 0). axis : 0 or 1, optional (1 by default) axis used to normalize the data along. If 1, independently normalize each sample, otherwise (if 0) normalize each feature. copy : boolean, optional, default True set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix and if axis is 1). return_norm : boolean, default False whether to return the computed norms See also -------- Normalizer: Performs normalization using the ``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`). """ if norm not in ('l1', 'l2', 'max'): raise ValueError("'%s' is not a supported norm" % norm) if axis == 0: sparse_format = 'csc' elif axis == 1: sparse_format = 'csr' else: raise ValueError("'%d' is not a supported axis" % axis) X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True, estimator='the normalize function', dtype=FLOAT_DTYPES) if axis == 0: X = X.T if sparse.issparse(X): if norm == 'l1': inplace_csr_row_normalize_l1(X) elif norm == 'l2': inplace_csr_row_normalize_l2(X) elif norm == 'max': _, norms = min_max_axis(X, 1) norms = norms.repeat(np.diff(X.indptr)) mask = norms != 0 X.data[mask] /= norms[mask] else: if norm == 'l1': norms = np.abs(X).sum(axis=1) elif norm == 'l2': norms = row_norms(X) elif norm == 'max': norms = np.max(X, axis=1) norms = _handle_zeros_in_scale(norms, copy=False) X /= norms[:, np.newaxis] if axis == 0: X = X.T if return_norm: return X, norms else: return X class Normalizer(BaseEstimator, TransformerMixin): """Normalize samples individually to unit norm. Each sample (i.e. each row of the data matrix) with at least one non zero component is rescaled independently of other samples so that its norm (l1 or l2) equals one. This transformer is able to work both with dense numpy arrays and scipy.sparse matrix (use CSR format if you want to avoid the burden of a copy / conversion). Scaling inputs to unit norms is a common operation for text classification or clustering for instance. For instance the dot product of two l2-normalized TF-IDF vectors is the cosine similarity of the vectors and is the base similarity metric for the Vector Space Model commonly used by the Information Retrieval community. Read more in the :ref:`User Guide <preprocessing_normalization>`. Parameters ---------- norm : 'l1', 'l2', or 'max', optional ('l2' by default) The norm to use to normalize each non zero sample. copy : boolean, optional, default True set to False to perform inplace row normalization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix). Notes ----- This estimator is stateless (besides constructor parameters), the fit method does nothing but is useful when used in a pipeline. See also -------- normalize: Equivalent function without the object oriented API. """ def __init__(self, norm='l2', copy=True): self.norm = norm self.copy = copy def fit(self, X, y=None): """Do nothing and return the estimator unchanged This method is just there to implement the usual API and hence work in pipelines. """ X = check_array(X, accept_sparse='csr') return self def transform(self, X, y=None, copy=None): """Scale each non zero row of X to unit norm Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data to normalize, row by row. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. """ copy = copy if copy is not None else self.copy X = check_array(X, accept_sparse='csr') return normalize(X, norm=self.norm, axis=1, copy=copy) def binarize(X, threshold=0.0, copy=True): """Boolean thresholding of array-like or scipy.sparse matrix Read more in the :ref:`User Guide <preprocessing_binarization>`. Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data to binarize, element by element. scipy.sparse matrices should be in CSR or CSC format to avoid an un-necessary copy. threshold : float, optional (0.0 by default) Feature values below or equal to this are replaced by 0, above it by 1. Threshold may not be less than 0 for operations on sparse matrices. copy : boolean, optional, default True set to False to perform inplace binarization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR / CSC matrix and if axis is 1). See also -------- Binarizer: Performs binarization using the ``Transformer`` API (e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`). """ X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy) if sparse.issparse(X): if threshold < 0: raise ValueError('Cannot binarize a sparse matrix with threshold ' '< 0') cond = X.data > threshold not_cond = np.logical_not(cond) X.data[cond] = 1 X.data[not_cond] = 0 X.eliminate_zeros() else: cond = X > threshold not_cond = np.logical_not(cond) X[cond] = 1 X[not_cond] = 0 return X class Binarizer(BaseEstimator, TransformerMixin): """Binarize data (set feature values to 0 or 1) according to a threshold Values greater than the threshold map to 1, while values less than or equal to the threshold map to 0. With the default threshold of 0, only positive values map to 1. Binarization is a common operation on text count data where the analyst can decide to only consider the presence or absence of a feature rather than a quantified number of occurrences for instance. It can also be used as a pre-processing step for estimators that consider boolean random variables (e.g. modelled using the Bernoulli distribution in a Bayesian setting). Read more in the :ref:`User Guide <preprocessing_binarization>`. Parameters ---------- threshold : float, optional (0.0 by default) Feature values below or equal to this are replaced by 0, above it by 1. Threshold may not be less than 0 for operations on sparse matrices. copy : boolean, optional, default True set to False to perform inplace binarization and avoid a copy (if the input is already a numpy array or a scipy.sparse CSR matrix). Notes ----- If the input is a sparse matrix, only the non-zero values are subject to update by the Binarizer class. This estimator is stateless (besides constructor parameters), the fit method does nothing but is useful when used in a pipeline. See also -------- binarize: Equivalent function without the object oriented API. """ def __init__(self, threshold=0.0, copy=True): self.threshold = threshold self.copy = copy def fit(self, X, y=None): """Do nothing and return the estimator unchanged This method is just there to implement the usual API and hence work in pipelines. """ check_array(X, accept_sparse='csr') return self def transform(self, X, y=None, copy=None): """Binarize each element of X Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] The data to binarize, element by element. scipy.sparse matrices should be in CSR format to avoid an un-necessary copy. """ copy = copy if copy is not None else self.copy return binarize(X, threshold=self.threshold, copy=copy) class KernelCenterer(BaseEstimator, TransformerMixin): """Center a kernel matrix Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a function mapping x to a Hilbert space. KernelCenterer centers (i.e., normalize to have zero mean) the data without explicitly computing phi(x). It is equivalent to centering phi(x) with sklearn.preprocessing.StandardScaler(with_std=False). Read more in the :ref:`User Guide <kernel_centering>`. """ def fit(self, K, y=None): """Fit KernelCenterer Parameters ---------- K : numpy array of shape [n_samples, n_samples] Kernel matrix. Returns ------- self : returns an instance of self. """ K = check_array(K, dtype=FLOAT_DTYPES) n_samples = K.shape[0] self.K_fit_rows_ = np.sum(K, axis=0) / n_samples self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples return self def transform(self, K, y=None, copy=True): """Center kernel matrix. Parameters ---------- K : numpy array of shape [n_samples1, n_samples2] Kernel matrix. copy : boolean, optional, default True Set to False to perform inplace computation. Returns ------- K_new : numpy array of shape [n_samples1, n_samples2] """ check_is_fitted(self, 'K_fit_all_') K = check_array(K, copy=copy, dtype=FLOAT_DTYPES) K_pred_cols = (np.sum(K, axis=1) / self.K_fit_rows_.shape[0])[:, np.newaxis] K -= self.K_fit_rows_ K -= K_pred_cols K += self.K_fit_all_ return K @property def _pairwise(self): return True def add_dummy_feature(X, value=1.0): """Augment dataset with an additional dummy feature. This is useful for fitting an intercept term with implementations which cannot otherwise fit it directly. Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] Data. value : float Value to use for the dummy feature. Returns ------- X : {array, sparse matrix}, shape [n_samples, n_features + 1] Same data with dummy feature added as first column. Examples -------- >>> from sklearn.preprocessing import add_dummy_feature >>> add_dummy_feature([[0, 1], [1, 0]]) array([[ 1., 0., 1.], [ 1., 1., 0.]]) """ X = check_array(X, accept_sparse=['csc', 'csr', 'coo'], dtype=FLOAT_DTYPES) n_samples, n_features = X.shape shape = (n_samples, n_features + 1) if sparse.issparse(X): if sparse.isspmatrix_coo(X): # Shift columns to the right. col = X.col + 1 # Column indices of dummy feature are 0 everywhere. col = np.concatenate((np.zeros(n_samples), col)) # Row indices of dummy feature are 0, ..., n_samples-1. row = np.concatenate((np.arange(n_samples), X.row)) # Prepend the dummy feature n_samples times. data = np.concatenate((np.ones(n_samples) * value, X.data)) return sparse.coo_matrix((data, (row, col)), shape) elif sparse.isspmatrix_csc(X): # Shift index pointers since we need to add n_samples elements. indptr = X.indptr + n_samples # indptr[0] must be 0. indptr = np.concatenate((np.array([0]), indptr)) # Row indices of dummy feature are 0, ..., n_samples-1. indices = np.concatenate((np.arange(n_samples), X.indices)) # Prepend the dummy feature n_samples times. data = np.concatenate((np.ones(n_samples) * value, X.data)) return sparse.csc_matrix((data, indices, indptr), shape) else: klass = X.__class__ return klass(add_dummy_feature(X.tocoo(), value)) else: return np.hstack((np.ones((n_samples, 1)) * value, X)) def _transform_selected(X, transform, selected="all", copy=True): """Apply a transform function to portion of selected features Parameters ---------- X : {array-like, sparse matrix}, shape [n_samples, n_features] Dense array or sparse matrix. transform : callable A callable transform(X) -> X_transformed copy : boolean, optional Copy X even if it could be avoided. selected: "all" or array of indices or mask Specify which features to apply the transform to. Returns ------- X : array or sparse matrix, shape=(n_samples, n_features_new) """ X = check_array(X, accept_sparse='csc', copy=copy, dtype=FLOAT_DTYPES) if isinstance(selected, six.string_types) and selected == "all": return transform(X) if len(selected) == 0: return X n_features = X.shape[1] ind = np.arange(n_features) sel = np.zeros(n_features, dtype=bool) sel[np.asarray(selected)] = True not_sel = np.logical_not(sel) n_selected = np.sum(sel) if n_selected == 0: # No features selected. return X elif n_selected == n_features: # All features selected. return transform(X) else: X_sel = transform(X[:, ind[sel]]) X_not_sel = X[:, ind[not_sel]] if sparse.issparse(X_sel) or sparse.issparse(X_not_sel): return sparse.hstack((X_sel, X_not_sel)) else: return np.hstack((X_sel, X_not_sel)) class OneHotEncoder(BaseEstimator, TransformerMixin): """Encode categorical integer features using a one-hot aka one-of-K scheme. The input to this transformer should be a matrix of integers, denoting the values taken on by categorical (discrete) features. The output will be a sparse matrix where each column corresponds to one possible value of one feature. It is assumed that input features take on values in the range [0, n_values). This encoding is needed for feeding categorical data to many scikit-learn estimators, notably linear models and SVMs with the standard kernels. Note: a one-hot encoding of y labels should use a LabelBinarizer instead. Read more in the :ref:`User Guide <preprocessing_categorical_features>`. Parameters ---------- n_values : 'auto', int or array of ints Number of values per feature. - 'auto' : determine value range from training data. - int : number of categorical values per feature. Each feature value should be in ``range(n_values)`` - array : ``n_values[i]`` is the number of categorical values in ``X[:, i]``. Each feature value should be in ``range(n_values[i])`` categorical_features : "all" or array of indices or mask Specify what features are treated as categorical. - 'all' (default): All features are treated as categorical. - array of indices: Array of categorical feature indices. - mask: Array of length n_features and with dtype=bool. Non-categorical features are always stacked to the right of the matrix. dtype : number type, default=np.float Desired dtype of output. sparse : boolean, default=True Will return sparse matrix if set True else will return an array. handle_unknown : str, 'error' or 'ignore' Whether to raise an error or ignore if a unknown categorical feature is present during transform. Attributes ---------- active_features_ : array Indices for active features, meaning values that actually occur in the training set. Only available when n_values is ``'auto'``. feature_indices_ : array of shape (n_features,) Indices to feature ranges. Feature ``i`` in the original data is mapped to features from ``feature_indices_[i]`` to ``feature_indices_[i+1]`` (and then potentially masked by `active_features_` afterwards) n_values_ : array of shape (n_features,) Maximum number of values per feature. Examples -------- Given a dataset with three features and two samples, we let the encoder find the maximum value per feature and transform the data to a binary one-hot encoding. >>> from sklearn.preprocessing import OneHotEncoder >>> enc = OneHotEncoder() >>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \ [1, 0, 2]]) # doctest: +ELLIPSIS OneHotEncoder(categorical_features='all', dtype=<... 'numpy.float64'>, handle_unknown='error', n_values='auto', sparse=True) >>> enc.n_values_ array([2, 3, 4]) >>> enc.feature_indices_ array([0, 2, 5, 9]) >>> enc.transform([[0, 1, 1]]).toarray() array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]]) See also -------- sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of dictionary items (also handles string-valued features). sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot encoding of dictionary items or strings. sklearn.preprocessing.LabelBinarizer : binarizes labels in a one-vs-all fashion. sklearn.preprocessing.MultiLabelBinarizer : transforms between iterable of iterables and a multilabel format, e.g. a (samples x classes) binary matrix indicating the presence of a class label. sklearn.preprocessing.LabelEncoder : encodes labels with values between 0 and n_classes-1. """ def __init__(self, n_values="auto", categorical_features="all", dtype=np.float64, sparse=True, handle_unknown='error'): self.n_values = n_values self.categorical_features = categorical_features self.dtype = dtype self.sparse = sparse self.handle_unknown = handle_unknown def fit(self, X, y=None): """Fit OneHotEncoder to X. Parameters ---------- X : array-like, shape [n_samples, n_feature] Input array of type int. Returns ------- self """ self.fit_transform(X) return self def _fit_transform(self, X): """Assumes X contains only categorical features.""" X = check_array(X, dtype=np.int) if np.any(X < 0): raise ValueError("X needs to contain only non-negative integers.") n_samples, n_features = X.shape if (isinstance(self.n_values, six.string_types) and self.n_values == 'auto'): n_values = np.max(X, axis=0) + 1 elif isinstance(self.n_values, numbers.Integral): if (np.max(X, axis=0) >= self.n_values).any(): raise ValueError("Feature out of bounds for n_values=%d" % self.n_values) n_values = np.empty(n_features, dtype=np.int) n_values.fill(self.n_values) else: try: n_values = np.asarray(self.n_values, dtype=int) except (ValueError, TypeError): raise TypeError("Wrong type for parameter `n_values`. Expected" " 'auto', int or array of ints, got %r" % type(X)) if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]: raise ValueError("Shape mismatch: if n_values is an array," " it has to be of shape (n_features,).") self.n_values_ = n_values n_values = np.hstack([[0], n_values]) indices = np.cumsum(n_values) self.feature_indices_ = indices column_indices = (X + indices[:-1]).ravel() row_indices = np.repeat(np.arange(n_samples, dtype=np.int32), n_features) data = np.ones(n_samples * n_features) out = sparse.coo_matrix((data, (row_indices, column_indices)), shape=(n_samples, indices[-1]), dtype=self.dtype).tocsr() if (isinstance(self.n_values, six.string_types) and self.n_values == 'auto'): mask = np.array(out.sum(axis=0)).ravel() != 0 active_features = np.where(mask)[0] out = out[:, active_features] self.active_features_ = active_features return out if self.sparse else out.toarray() def fit_transform(self, X, y=None): """Fit OneHotEncoder to X, then transform X. Equivalent to self.fit(X).transform(X), but more convenient and more efficient. See fit for the parameters, transform for the return value. """ return _transform_selected(X, self._fit_transform, self.categorical_features, copy=True) def _transform(self, X): """Assumes X contains only categorical features.""" X = check_array(X, dtype=np.int) if np.any(X < 0): raise ValueError("X needs to contain only non-negative integers.") n_samples, n_features = X.shape indices = self.feature_indices_ if n_features != indices.shape[0] - 1: raise ValueError("X has different shape than during fitting." " Expected %d, got %d." % (indices.shape[0] - 1, n_features)) # We use only those categorical features of X that are known using fit. # i.e lesser than n_values_ using mask. # This means, if self.handle_unknown is "ignore", the row_indices and # col_indices corresponding to the unknown categorical feature are # ignored. mask = (X < self.n_values_).ravel() if np.any(~mask): if self.handle_unknown not in ['error', 'ignore']: raise ValueError("handle_unknown should be either error or " "unknown got %s" % self.handle_unknown) if self.handle_unknown == 'error': raise ValueError("unknown categorical feature present %s " "during transform." % X.ravel()[~mask]) column_indices = (X + indices[:-1]).ravel()[mask] row_indices = np.repeat(np.arange(n_samples, dtype=np.int32), n_features)[mask] data = np.ones(np.sum(mask)) out = sparse.coo_matrix((data, (row_indices, column_indices)), shape=(n_samples, indices[-1]), dtype=self.dtype).tocsr() if (isinstance(self.n_values, six.string_types) and self.n_values == 'auto'): out = out[:, self.active_features_] return out if self.sparse else out.toarray() def transform(self, X): """Transform X using one-hot encoding. Parameters ---------- X : array-like, shape [n_samples, n_features] Input array of type int. Returns ------- X_out : sparse matrix if sparse=True else a 2-d array, dtype=int Transformed input. """ return _transform_selected(X, self._transform, self.categorical_features, copy=True)
bsd-3-clause
jm-begon/scikit-learn
examples/cluster/plot_kmeans_silhouette_analysis.py
242
5885
""" =============================================================================== Selecting the number of clusters with silhouette analysis on KMeans clustering =============================================================================== Silhouette analysis can be used to study the separation distance between the resulting clusters. The silhouette plot displays a measure of how close each point in one cluster is to points in the neighboring clusters and thus provides a way to assess parameters like number of clusters visually. This measure has a range of [-1, 1]. Silhoette coefficients (as these values are referred to as) near +1 indicate that the sample is far away from the neighboring clusters. A value of 0 indicates that the sample is on or very close to the decision boundary between two neighboring clusters and negative values indicate that those samples might have been assigned to the wrong cluster. In this example the silhouette analysis is used to choose an optimal value for ``n_clusters``. The silhouette plot shows that the ``n_clusters`` value of 3, 5 and 6 are a bad pick for the given data due to the presence of clusters with below average silhouette scores and also due to wide fluctuations in the size of the silhouette plots. Silhouette analysis is more ambivalent in deciding between 2 and 4. Also from the thickness of the silhouette plot the cluster size can be visualized. The silhouette plot for cluster 0 when ``n_clusters`` is equal to 2, is bigger in size owing to the grouping of the 3 sub clusters into one big cluster. However when the ``n_clusters`` is equal to 4, all the plots are more or less of similar thickness and hence are of similar sizes as can be also verified from the labelled scatter plot on the right. """ from __future__ import print_function from sklearn.datasets import make_blobs from sklearn.cluster import KMeans from sklearn.metrics import silhouette_samples, silhouette_score import matplotlib.pyplot as plt import matplotlib.cm as cm import numpy as np print(__doc__) # Generating the sample data from make_blobs # This particular setting has one distict cluster and 3 clusters placed close # together. X, y = make_blobs(n_samples=500, n_features=2, centers=4, cluster_std=1, center_box=(-10.0, 10.0), shuffle=True, random_state=1) # For reproducibility range_n_clusters = [2, 3, 4, 5, 6] for n_clusters in range_n_clusters: # Create a subplot with 1 row and 2 columns fig, (ax1, ax2) = plt.subplots(1, 2) fig.set_size_inches(18, 7) # The 1st subplot is the silhouette plot # The silhouette coefficient can range from -1, 1 but in this example all # lie within [-0.1, 1] ax1.set_xlim([-0.1, 1]) # The (n_clusters+1)*10 is for inserting blank space between silhouette # plots of individual clusters, to demarcate them clearly. ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10]) # Initialize the clusterer with n_clusters value and a random generator # seed of 10 for reproducibility. clusterer = KMeans(n_clusters=n_clusters, random_state=10) cluster_labels = clusterer.fit_predict(X) # The silhouette_score gives the average value for all the samples. # This gives a perspective into the density and separation of the formed # clusters silhouette_avg = silhouette_score(X, cluster_labels) print("For n_clusters =", n_clusters, "The average silhouette_score is :", silhouette_avg) # Compute the silhouette scores for each sample sample_silhouette_values = silhouette_samples(X, cluster_labels) y_lower = 10 for i in range(n_clusters): # Aggregate the silhouette scores for samples belonging to # cluster i, and sort them ith_cluster_silhouette_values = \ sample_silhouette_values[cluster_labels == i] ith_cluster_silhouette_values.sort() size_cluster_i = ith_cluster_silhouette_values.shape[0] y_upper = y_lower + size_cluster_i color = cm.spectral(float(i) / n_clusters) ax1.fill_betweenx(np.arange(y_lower, y_upper), 0, ith_cluster_silhouette_values, facecolor=color, edgecolor=color, alpha=0.7) # Label the silhouette plots with their cluster numbers at the middle ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i)) # Compute the new y_lower for next plot y_lower = y_upper + 10 # 10 for the 0 samples ax1.set_title("The silhouette plot for the various clusters.") ax1.set_xlabel("The silhouette coefficient values") ax1.set_ylabel("Cluster label") # The vertical line for average silhoutte score of all the values ax1.axvline(x=silhouette_avg, color="red", linestyle="--") ax1.set_yticks([]) # Clear the yaxis labels / ticks ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1]) # 2nd Plot showing the actual clusters formed colors = cm.spectral(cluster_labels.astype(float) / n_clusters) ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7, c=colors) # Labeling the clusters centers = clusterer.cluster_centers_ # Draw white circles at cluster centers ax2.scatter(centers[:, 0], centers[:, 1], marker='o', c="white", alpha=1, s=200) for i, c in enumerate(centers): ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50) ax2.set_title("The visualization of the clustered data.") ax2.set_xlabel("Feature space for the 1st feature") ax2.set_ylabel("Feature space for the 2nd feature") plt.suptitle(("Silhouette analysis for KMeans clustering on sample data " "with n_clusters = %d" % n_clusters), fontsize=14, fontweight='bold') plt.show()
bsd-3-clause